Merge branch 'master' of github.com:bMorgan01/SpellingSpider

This commit is contained in:
bMorgan01 2022-09-25 10:28:30 -06:00
commit 5b12550e9b
2 changed files with 49 additions and 37 deletions

View file

@ -1,7 +1,5 @@
# Domain Ex: benrmorgan.com
benrmorgan.com
# Prefix Ex: http://www.
http://www.
# Target
http://www.benrmorgan.com
# Ignore urls containing Ex: /files/
/files/
/images/

54
main.py
View file

@ -1,33 +1,53 @@
import datetime
import os
import re
from stat import S_ISFIFO
import sys
from urllib.parse import urlparse, urlunparse, urljoin
import bs4
from urllib.request import Request, urlopen
from os.path import exists
from shutil import move
import language_tool_python
import argparse
parser = argparse.ArgumentParser()
def spider(prefix, domain, exclude):
return spider_rec(dict(), prefix, domain, "/", exclude)
def spider(target, exclude):
parsed_target = urlparse(target)
return spider_rec(dict(), target, parsed_target, exclude)
def spider_rec(page_texts, prefix, domain, postfix, exclude):
req = Request(prefix + domain + postfix)
def spider_rec(page_texts, current_href, base_parse, exclude):
target_url = urlunparse(base_parse)
parse_result = urlparse(urljoin(target_url, current_href))
req = Request(urlunparse(parse_result))
postfix = parse_result.path
if parse_result.query:
postfix += "?" + parse_result.query
if len(postfix) == 0:
postfix = "/"
if parse_result.hostname == base_parse.hostname:
html_page = urlopen(req)
soup = bs4.BeautifulSoup(html_page, "lxml")
page_texts[postfix] = [soup.getText(), soup.find_all('html')[0].get("lang")]
if page_texts[postfix][1] is None:
page_texts[postfix][1] = 'en-us'
for link in soup.findAll('a'):
href = link.get('href')
if "mailto:" not in href and (domain in href or href[0] == '/'):
href = href.replace(" ", "%20")
if "mailto:" not in href:
if not urlparse(href).hostname:
href_parse = urlparse(urljoin(target_url, href))
href = href_parse.path
if href_parse.query:
href += "?" + href_parse.query
if href not in page_texts.keys():
found = False
for d in exclude:
@ -38,11 +58,7 @@ def spider_rec(page_texts, prefix, domain, postfix, exclude):
if found:
continue
href = href.replace(" ", "%20")
if domain in href:
spider_rec(page_texts, "", "", href, exclude)
else:
spider_rec(page_texts, prefix, domain, href, exclude)
spider_rec(page_texts, href, base_parse, exclude)
return page_texts
@ -79,15 +95,13 @@ def main(report: bool):
line = line.replace("\r", "")
conf.append(line)
domain = conf[1]
prefix = conf[3]
ignores = conf[5:conf.index("# Custom Dictionary Ex: Strato")]
target = conf[1]
ignores = conf[3:conf.index("# Custom Dictionary Ex: Strato")]
custDict = conf[conf.index("# Custom Dictionary Ex: Strato") + 1::]
if not report:
print("Crawling site...")
links = spider(prefix, domain, ignores)
date = datetime.datetime.utcnow()
links = spider(target, ignores)
if not report:
print("Starting local language servers for")