Newer
Older
from urllib.parse import urlparse, urljoin
# returns true only if the time since crawling the url is bigger than the cooldown period which we set further down
def time_passed(link):
reader = ix.reader()
try:
link_time = reader.stored_fields(reader.first_id("url", link))["date"]
time_difference = datetime.now() - link_time
return time_difference > crawl_cooldown
except:
return True
ix = indexing.get_index()
crawl_cooldown = timedelta(days=1) # setting the crawl cooldown time
depth_limit = 2 # making sure to limit the amount of websites we crawl
queue.put(('https://en.wikipedia.org/wiki/Artificial_neural_network', 0)) # initialize queue with start URL
current_url, depth = queue.get() # take the first url out of the queue
# extract website content
r = requests.get(current_url, timeout=3)
soup = BeautifulSoup(r.content, 'html.parser')
urltitle = str(soup.title.string)
request_time = datetime.now()
writer.update_document(title=urltitle, url=r.url, content=str(soup.text), date=request_time) #adding the website information to the crawler
writer.commit()
print(F"Queue length: {queue.qsize()}, searching on depth: {depth}, site: {r.url}")
if wikipedia: # only works on wikipedia
new_links = soup.find(id="bodyContent").find_all('a', href=True, limit=20) # finding first 20 links of the content body on this website
for link in new_links: # for each of the links on the current website
href = urljoin(current_url, link['href']) # joining current url with its sublinks
url = urlparse(href)
# putting the url in the queue if it has the right format, is inside the allowed domains, has not been crawled recently and is no fragment url
if os.path.splitext(url.path)[1] in ('.html', '', '.htm') and url.hostname in ALLOWED_DOMAINS and href not in visited_list and time_passed(href) and "#" not in href: