2021-04-23 20:34:42 +02:00
|
|
|
import asyncio
|
|
|
|
import logging
|
2021-04-23 22:49:21 +02:00
|
|
|
import os
|
2021-04-23 20:34:42 +02:00
|
|
|
import re
|
2021-04-24 11:38:33 +02:00
|
|
|
from asyncio.exceptions import TimeoutError
|
2021-04-23 20:34:42 +02:00
|
|
|
from html import unescape
|
2021-04-23 22:49:21 +02:00
|
|
|
from time import time
|
2021-04-23 20:34:42 +02:00
|
|
|
from urllib.parse import unquote
|
|
|
|
|
|
|
|
import aiohttp
|
2021-04-24 11:29:19 +02:00
|
|
|
from aiohttp import ClientConnectorError
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
PROTOCOL = 'https://'
|
|
|
|
BASE_URL = 'telegram.org'
|
2021-04-23 22:31:23 +02:00
|
|
|
# its necessary to help crawler to find more links
|
|
|
|
HIDDEN_URLS = {
|
2021-04-24 11:29:19 +02:00
|
|
|
# 'corefork.telegram.org',
|
2021-04-23 22:31:23 +02:00
|
|
|
|
|
|
|
'telegram.org/privacy/gmailbot',
|
|
|
|
'telegram.org/tos',
|
2021-04-24 00:06:50 +02:00
|
|
|
'telegram.org/tour',
|
2021-04-24 14:19:01 +02:00
|
|
|
'telegram.org/evolution',
|
2021-04-24 00:06:50 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
'desktop.telegram.org/changelog',
|
2021-05-05 12:58:54 +02:00
|
|
|
|
|
|
|
'instantview.telegram.org/rules',
|
2021-04-23 22:31:23 +02:00
|
|
|
}
|
2021-04-23 20:34:42 +02:00
|
|
|
BASE_URL_REGEX = r'telegram.org'
|
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
# disable crawling sub links for specific domains and url patterns
|
|
|
|
CRAWL_RULES = {
|
|
|
|
# every rule is regex
|
|
|
|
# empty string means match any url
|
2021-04-25 14:13:44 +02:00
|
|
|
# allow rules with higher priority than deny
|
2021-04-23 20:34:42 +02:00
|
|
|
'translations.telegram.org': {
|
2021-04-24 11:29:19 +02:00
|
|
|
'allow': {
|
|
|
|
r'^[^/]*$', # root
|
|
|
|
r'org/[^/]*/$', # 1 lvl sub
|
|
|
|
r'/en/[a-z_]+/$' # 1 lvl after /en/
|
|
|
|
},
|
|
|
|
'deny': {
|
|
|
|
'', # all
|
2021-04-24 00:06:50 +02:00
|
|
|
}
|
2021-04-23 20:34:42 +02:00
|
|
|
},
|
2021-04-24 11:29:19 +02:00
|
|
|
'bugs.telegram.org': { # crawl first page of cards sorted by rating
|
|
|
|
'deny': {
|
2021-04-24 14:43:01 +02:00
|
|
|
# r'/c/[0-9]+/[0-9]+', # disable comments
|
|
|
|
'',
|
2021-04-24 00:06:50 +02:00
|
|
|
},
|
2021-04-23 20:34:42 +02:00
|
|
|
},
|
|
|
|
'instantview.telegram.org': {
|
2021-04-24 11:29:19 +02:00
|
|
|
'deny': {
|
2021-04-24 00:06:50 +02:00
|
|
|
'file/',
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
r'templates/.+',
|
2021-04-24 00:06:50 +02:00
|
|
|
'samples/',
|
2021-05-05 12:58:54 +02:00
|
|
|
'contest',
|
2021-04-24 00:06:50 +02:00
|
|
|
},
|
2021-04-23 20:34:42 +02:00
|
|
|
},
|
|
|
|
'core.telegram.org': {
|
2021-04-24 11:29:19 +02:00
|
|
|
'deny': {
|
2021-04-24 00:06:50 +02:00
|
|
|
'file/',
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-05-01 15:10:20 +02:00
|
|
|
'bots/payments',
|
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
'tdlib/docs/classtd',
|
2021-05-01 16:28:02 +02:00
|
|
|
|
|
|
|
'validatedRequestedInfo',
|
2021-04-24 00:06:50 +02:00
|
|
|
},
|
2021-04-23 20:34:42 +02:00
|
|
|
},
|
|
|
|
'telegram.org': {
|
2021-04-24 11:29:19 +02:00
|
|
|
'deny': {
|
2021-04-24 00:06:50 +02:00
|
|
|
'file/',
|
2021-04-25 18:59:03 +02:00
|
|
|
r'apps$'
|
2021-04-24 00:06:50 +02:00
|
|
|
},
|
2021-05-01 15:10:20 +02:00
|
|
|
},
|
|
|
|
'webz.telegram.org': {
|
|
|
|
'deny': {
|
|
|
|
'',
|
|
|
|
},
|
|
|
|
},
|
|
|
|
'webk.telegram.org': {
|
|
|
|
'deny': {
|
|
|
|
'',
|
|
|
|
},
|
|
|
|
},
|
2021-04-23 20:34:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DIRECT_LINK_REGEX = r'([-a-zA-Z0-9@:%._\+~#]{0,249}' + BASE_URL_REGEX + r')'
|
2021-04-24 22:40:25 +02:00
|
|
|
ABSOLUTE_LINK_REGEX = r'([-a-zA-Z0-9@:%._\+~#]{0,248}' + BASE_URL_REGEX + r'\b[-a-zA-Z0-9@:%_\+.~#?&//=]*)'
|
|
|
|
RELATIVE_LINK_REGEX = r'\/(?!\/)([-a-zA-Z0-9\/@:%._\+~#]{0,249})'
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
DOM_ATTRS = ['href', 'src']
|
|
|
|
|
2021-04-23 21:47:15 +02:00
|
|
|
OUTPUT_FILENAME = os.environ.get('OUTPUT_FILENAME', 'tracked_links.txt')
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
# unsecure but so simple
|
|
|
|
CONNECTOR = aiohttp.TCPConnector(ssl=False)
|
2021-04-24 11:29:19 +02:00
|
|
|
TIMEOUT = aiohttp.ClientTimeout(total=30)
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
VISITED_LINKS = set()
|
|
|
|
LINKS_TO_TRACK = set()
|
|
|
|
|
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
def should_exclude(url: str) -> bool:
|
|
|
|
direct_link = re.findall(DIRECT_LINK_REGEX, url)[0]
|
|
|
|
domain_rules = CRAWL_RULES.get(direct_link)
|
|
|
|
if not domain_rules:
|
|
|
|
return False
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
allow_rules = domain_rules.get('allow', set())
|
|
|
|
deny_rules = domain_rules.get('deny', set())
|
2021-04-24 00:06:50 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
exclude = False
|
2021-04-24 00:06:50 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
for regex in deny_rules:
|
|
|
|
if re.search(regex, url):
|
|
|
|
exclude = True
|
|
|
|
break
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
for regex in allow_rules:
|
|
|
|
if re.search(regex, url):
|
|
|
|
exclude = False
|
|
|
|
break
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-04-24 11:29:19 +02:00
|
|
|
return exclude
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
|
2021-04-24 00:06:50 +02:00
|
|
|
def find_absolute_links(html: str) -> set[str]:
|
|
|
|
absolute_links = set(re.findall(ABSOLUTE_LINK_REGEX, html))
|
|
|
|
|
|
|
|
return {link for link in absolute_links if not should_exclude(link)}
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
|
|
|
|
def find_relative_links(html: str, cur_link: str) -> set[str]:
|
|
|
|
direct_cur_link = re.findall(DIRECT_LINK_REGEX, cur_link)[0]
|
|
|
|
# optimization. when we want to exclude domain
|
2021-04-24 00:06:50 +02:00
|
|
|
if should_exclude(cur_link):
|
2021-04-23 20:34:42 +02:00
|
|
|
return set()
|
|
|
|
|
|
|
|
relative_links = set()
|
|
|
|
for attr in DOM_ATTRS:
|
|
|
|
regex = f'{attr}="{RELATIVE_LINK_REGEX}'
|
|
|
|
links = re.findall(regex, html)
|
|
|
|
|
|
|
|
for link in links:
|
2021-04-24 22:40:25 +02:00
|
|
|
url = f'{direct_cur_link}/{link}'
|
|
|
|
if not should_exclude(url):
|
|
|
|
relative_links.add(url)
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
return relative_links
|
|
|
|
|
|
|
|
|
|
|
|
def cleanup_links(links: set[str]) -> set[str]:
|
|
|
|
cleaned_links = set()
|
|
|
|
for tmp_link in links:
|
|
|
|
# normalize link
|
|
|
|
link = unquote(tmp_link)
|
|
|
|
link = unescape(link)
|
|
|
|
link = link.replace('www.', '')
|
|
|
|
link = link.replace('http://', '').replace('https://', '')
|
2021-04-24 22:40:25 +02:00
|
|
|
|
2021-04-23 20:34:42 +02:00
|
|
|
# skip anchor links
|
|
|
|
if '#' in link:
|
|
|
|
continue
|
|
|
|
|
2021-04-24 22:40:25 +02:00
|
|
|
# remove get params from link
|
|
|
|
if '?' in link:
|
|
|
|
link = ''.join(link.split('?')[:-1])
|
|
|
|
|
|
|
|
# skip mailto:
|
2021-04-23 20:34:42 +02:00
|
|
|
link_parts = link.split('.')
|
|
|
|
if '@' in link_parts[0]:
|
|
|
|
continue
|
|
|
|
|
|
|
|
cleaned_links.add(link)
|
|
|
|
|
|
|
|
return cleaned_links
|
|
|
|
|
|
|
|
|
2021-04-23 22:38:54 +02:00
|
|
|
async def crawl(url: str, session: aiohttp.ClientSession):
|
2021-04-24 22:40:25 +02:00
|
|
|
if url in VISITED_LINKS:
|
2021-04-23 20:34:42 +02:00
|
|
|
return
|
2021-04-24 22:40:25 +02:00
|
|
|
VISITED_LINKS.add(url)
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
logger.info(f'[{len(VISITED_LINKS)}] Process {url}')
|
2021-04-25 08:17:58 +02:00
|
|
|
async with session.get(f'{PROTOCOL}{url}', allow_redirects=False, timeout=TIMEOUT) as response:
|
2021-04-23 20:34:42 +02:00
|
|
|
status_code = response.status
|
|
|
|
content_type = response.headers.get('content-type')
|
|
|
|
|
|
|
|
if status_code != 200:
|
|
|
|
return
|
|
|
|
|
|
|
|
if 'text/html' in content_type:
|
|
|
|
LINKS_TO_TRACK.add(url)
|
|
|
|
|
|
|
|
html = await response.text()
|
|
|
|
absolute_links = cleanup_links(find_absolute_links(html))
|
|
|
|
relative_links = cleanup_links(find_relative_links(html, url))
|
|
|
|
|
2021-04-23 20:44:17 +02:00
|
|
|
sub_links = absolute_links | relative_links
|
2021-04-23 22:38:54 +02:00
|
|
|
await asyncio.gather(*[crawl(url, session) for url in sub_links])
|
2021-04-23 20:34:42 +02:00
|
|
|
elif 'application/javascript' in content_type:
|
|
|
|
LINKS_TO_TRACK.add(url)
|
|
|
|
elif 'text/css' in content_type:
|
|
|
|
LINKS_TO_TRACK.add(url)
|
|
|
|
elif 'application/json' in content_type:
|
|
|
|
LINKS_TO_TRACK.add(url)
|
|
|
|
else:
|
|
|
|
# TODO track hashes of image/svg/video content types
|
|
|
|
logger.info(f'Unhandled type: {content_type}')
|
2021-04-24 22:40:25 +02:00
|
|
|
|
|
|
|
# telegram url can work with and without trailing slash (no redirect). P.S. not on every sub domain ;d
|
|
|
|
# so this is a problem when we have random behavior with link will be added
|
|
|
|
# this if resolve this issue. If available both link we prefer without trailing slash
|
|
|
|
without_trailing_slash = url[:-1:] if url.endswith('/') else url
|
|
|
|
if without_trailing_slash in LINKS_TO_TRACK and \
|
|
|
|
f'{without_trailing_slash}/' in LINKS_TO_TRACK:
|
|
|
|
LINKS_TO_TRACK.remove(f'{without_trailing_slash}/')
|
2021-04-24 11:29:19 +02:00
|
|
|
except UnicodeDecodeError:
|
|
|
|
logger.warning('Codec can\'t decode byte. So its was a tgs file')
|
2021-04-25 08:17:58 +02:00
|
|
|
except ClientConnectorError:
|
|
|
|
logger.warning(f'Wrong link: {url}')
|
|
|
|
except TimeoutError:
|
2021-04-24 22:40:25 +02:00
|
|
|
logger.warning(f'Retrying {url}')
|
2021-04-25 08:17:58 +02:00
|
|
|
VISITED_LINKS.remove(url)
|
2021-04-24 11:29:19 +02:00
|
|
|
await asyncio.gather(crawl(url, session))
|
2021-04-23 20:34:42 +02:00
|
|
|
|
|
|
|
|
2021-04-23 22:31:23 +02:00
|
|
|
async def start(url_list: set[str]):
|
2021-04-23 22:38:54 +02:00
|
|
|
async with aiohttp.ClientSession(connector=CONNECTOR) as session:
|
|
|
|
await asyncio.gather(*[crawl(url, session) for url in url_list])
|
2021-04-23 22:31:23 +02:00
|
|
|
|
|
|
|
|
2021-04-23 20:34:42 +02:00
|
|
|
if __name__ == '__main__':
|
2021-04-23 22:31:23 +02:00
|
|
|
HIDDEN_URLS.add(BASE_URL)
|
|
|
|
|
2021-04-24 14:19:01 +02:00
|
|
|
logger.info('Start crawling links...')
|
2021-04-23 22:49:21 +02:00
|
|
|
start_time = time()
|
2021-04-23 22:31:23 +02:00
|
|
|
asyncio.get_event_loop().run_until_complete(start(HIDDEN_URLS))
|
2021-04-24 14:19:01 +02:00
|
|
|
logger.info(f'Stop crawling links. {time() - start_time} sec.')
|
2021-04-23 20:34:42 +02:00
|
|
|
|
2021-04-24 22:40:25 +02:00
|
|
|
try:
|
|
|
|
with open(OUTPUT_FILENAME, 'r') as f:
|
|
|
|
OLD_URL_LIST = set([l.replace('\n', '') for l in f.readlines()])
|
|
|
|
|
|
|
|
logger.info(f'Is equal: {OLD_URL_LIST == LINKS_TO_TRACK}')
|
|
|
|
logger.info(f'Deleted: {OLD_URL_LIST - LINKS_TO_TRACK}')
|
|
|
|
logger.info(f'Added: {LINKS_TO_TRACK - OLD_URL_LIST}')
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
|
2021-04-23 20:34:42 +02:00
|
|
|
with open(OUTPUT_FILENAME, 'w') as f:
|
|
|
|
f.write('\n'.join(sorted(LINKS_TO_TRACK)))
|