diff --git a/crawl.conf b/crawl.conf index f31dd40..0d3b639 100755 --- a/crawl.conf +++ b/crawl.conf @@ -1,7 +1,5 @@ -# Domain Ex: benrmorgan.com -benrmorgan.com -# Prefix Ex: http://www. -http://www. +# Target +http://www.benrmorgan.com # Ignore urls containing Ex: /files/ /files/ /images/ diff --git a/main.py b/main.py index 13c6fc1..efbc7aa 100755 --- a/main.py +++ b/main.py @@ -1,48 +1,64 @@ -import datetime import os import re from stat import S_ISFIFO import sys +from urllib.parse import urlparse, urlunparse, urljoin import bs4 from urllib.request import Request, urlopen -from os.path import exists -from shutil import move import language_tool_python import argparse parser = argparse.ArgumentParser() -def spider(prefix, domain, exclude): - return spider_rec(dict(), prefix, domain, "/", exclude) + +def spider(target, exclude): + parsed_target = urlparse(target) + return spider_rec(dict(), target, parsed_target, exclude) -def spider_rec(page_texts, prefix, domain, postfix, exclude): - req = Request(prefix + domain + postfix) - html_page = urlopen(req) +def spider_rec(page_texts, current_href, base_parse, exclude): + target_url = urlunparse(base_parse) + parse_result = urlparse(urljoin(target_url, current_href)) + req = Request(urlunparse(parse_result)) - soup = bs4.BeautifulSoup(html_page, "lxml") + postfix = parse_result.path + if parse_result.query: + postfix += "?" + parse_result.query - page_texts[postfix] = [soup.getText(), soup.find_all('html')[0].get("lang")] - if page_texts[postfix][1] is None: - page_texts[postfix][1] = 'en-us' + if len(postfix) == 0: + postfix = "/" - for link in soup.findAll('a'): - href = link.get('href') - if "mailto:" not in href and (domain in href or href[0] == '/'): - if href not in page_texts.keys(): - found = False - for d in exclude: - if d in href: - found = True - break + if parse_result.hostname == base_parse.hostname: + html_page = urlopen(req) + soup = bs4.BeautifulSoup(html_page, "lxml") + page_texts[postfix] = [soup.getText(), soup.find_all('html')[0].get("lang")] - if found: - continue + if page_texts[postfix][1] is None: + page_texts[postfix][1] = 'en-us' - href = href.replace(" ", "%20") - if domain in href: - spider_rec(page_texts, "", "", href, exclude) - else: - spider_rec(page_texts, prefix, domain, href, exclude) + for link in soup.findAll('a'): + href = link.get('href') + href = href.replace(" ", "%20") + + if "mailto:" not in href: + if not urlparse(href).hostname: + href_parse = urlparse(urljoin(target_url, href)) + href = href_parse.path + + if href_parse.query: + href += "?" + href_parse.query + + + if href not in page_texts.keys(): + found = False + for d in exclude: + if d in href: + found = True + break + + if found: + continue + + spider_rec(page_texts, href, base_parse, exclude) return page_texts @@ -79,15 +95,13 @@ def main(report: bool): line = line.replace("\r", "") conf.append(line) - domain = conf[1] - prefix = conf[3] - ignores = conf[5:conf.index("# Custom Dictionary Ex: Strato")] + target = conf[1] + ignores = conf[3:conf.index("# Custom Dictionary Ex: Strato")] custDict = conf[conf.index("# Custom Dictionary Ex: Strato") + 1::] if not report: print("Crawling site...") - links = spider(prefix, domain, ignores) - date = datetime.datetime.utcnow() + links = spider(target, ignores) if not report: print("Starting local language servers for")