2017-10-18 20:08:33 -04:00
|
|
|
"""
|
|
|
|
In Bookmark Archiver, a Link represents a single entry that we track in the
|
|
|
|
json index. All links pass through all archiver functions and the latest,
|
2017-10-23 05:58:41 -04:00
|
|
|
most up-to-date canonical output for each is stored in "latest".
|
|
|
|
|
2017-10-18 20:08:33 -04:00
|
|
|
|
|
|
|
Link {
|
2017-10-23 05:58:41 -04:00
|
|
|
timestamp: str, (how we uniquely id links) _ _ _ _ ___
|
2017-10-18 20:08:33 -04:00
|
|
|
url: str, | \ / \ |\| ' |
|
|
|
|
base_url: str, |_/ \_/ | | |
|
|
|
|
domain: str, _ _ _ _ _ _
|
|
|
|
tags: str, |_) /| |\| | / `
|
|
|
|
type: str, | /"| | | | \_,
|
|
|
|
title: str, ,-'"`-.
|
|
|
|
sources: [str], /// / @ @ \ \\\\
|
2017-10-23 05:58:41 -04:00
|
|
|
latest: { \ :=| ,._,. |=: /
|
2017-10-18 20:08:33 -04:00
|
|
|
..., || ,\ \_../ /. ||
|
|
|
|
pdf: 'output.pdf', ||','`-._))'`.`||
|
|
|
|
wget: 'example.com/1234/index.html' `-' (/ `-'
|
|
|
|
},
|
|
|
|
history: {
|
|
|
|
...
|
|
|
|
pdf: [
|
|
|
|
{timestamp: 15444234325, status: 'skipped', result='output.pdf'},
|
|
|
|
...
|
|
|
|
],
|
|
|
|
wget: [
|
|
|
|
{timestamp: 11534435345, status: 'succeded', result='donuts.com/eat/them.html'}
|
|
|
|
]
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
"""
|
2017-10-18 18:38:17 -04:00
|
|
|
|
2018-04-17 03:22:59 -04:00
|
|
|
import datetime
|
2018-04-17 10:30:25 -04:00
|
|
|
from html import unescape
|
2018-04-17 03:22:59 -04:00
|
|
|
|
2017-10-23 05:58:41 -04:00
|
|
|
from util import (
|
|
|
|
domain,
|
|
|
|
base_url,
|
2017-10-30 03:50:37 -04:00
|
|
|
str_between,
|
2017-10-23 05:58:41 -04:00
|
|
|
get_link_type,
|
2017-10-30 03:50:37 -04:00
|
|
|
merge_links,
|
2018-04-17 10:30:25 -04:00
|
|
|
wget_output_path,
|
2017-10-23 05:58:41 -04:00
|
|
|
)
|
2018-04-17 03:22:59 -04:00
|
|
|
from config import ANSI
|
2017-10-23 05:58:41 -04:00
|
|
|
|
|
|
|
|
2017-10-18 18:38:17 -04:00
|
|
|
def validate_links(links):
|
2017-10-23 05:58:41 -04:00
|
|
|
links = archivable_links(links) # remove chrome://, about:, mailto: etc.
|
2018-04-17 09:14:13 -04:00
|
|
|
links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls
|
2018-04-17 09:49:32 -04:00
|
|
|
links = sorted_links(links) # deterministically sort the links based on timstamp, url
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
if not links:
|
|
|
|
print('[X] No links found :(')
|
|
|
|
raise SystemExit(1)
|
|
|
|
|
2018-04-17 10:30:25 -04:00
|
|
|
for link in links:
|
|
|
|
link['title'] = unescape(link['title'])
|
|
|
|
link['latest'] = link.get('latest') or {}
|
|
|
|
|
|
|
|
if not link['latest'].get('wget'):
|
|
|
|
link['latest']['wget'] = wget_output_path(link)
|
|
|
|
|
|
|
|
if not link['latest'].get('pdf'):
|
2018-04-17 17:16:29 -04:00
|
|
|
link['latest']['pdf'] = None
|
2018-04-17 10:30:25 -04:00
|
|
|
|
|
|
|
if not link['latest'].get('screenshot'):
|
2018-04-17 17:16:29 -04:00
|
|
|
link['latest']['screenshot'] = None
|
2018-04-17 10:30:25 -04:00
|
|
|
|
2017-10-18 18:38:17 -04:00
|
|
|
return list(links)
|
|
|
|
|
|
|
|
|
2017-10-23 05:58:41 -04:00
|
|
|
def archivable_links(links):
|
|
|
|
"""remove chrome://, about:// or other schemed links that cant be archived"""
|
|
|
|
return (
|
|
|
|
link
|
|
|
|
for link in links
|
|
|
|
if any(link['url'].startswith(s) for s in ('http://', 'https://', 'ftp://'))
|
|
|
|
)
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
def uniquefied_links(sorted_links):
|
|
|
|
"""
|
|
|
|
ensures that all non-duplicate links have monotonically increasing timestamps
|
|
|
|
"""
|
|
|
|
|
2017-10-18 20:33:31 -04:00
|
|
|
unique_urls = {}
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
lower = lambda url: url.lower().strip()
|
|
|
|
without_www = lambda url: url.replace('://www.', '://', 1)
|
|
|
|
without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
|
|
|
|
|
|
|
|
for link in sorted_links:
|
2017-10-18 20:33:31 -04:00
|
|
|
fuzzy_url = without_www(without_trailing_slash(lower(link['url'])))
|
|
|
|
if fuzzy_url in unique_urls:
|
2017-10-18 18:38:17 -04:00
|
|
|
# merge with any other links that share the same url
|
2017-10-18 20:33:31 -04:00
|
|
|
link = merge_links(unique_urls[fuzzy_url], link)
|
|
|
|
unique_urls[fuzzy_url] = link
|
|
|
|
|
|
|
|
unique_timestamps = {}
|
|
|
|
for link in unique_urls.values():
|
|
|
|
link['timestamp'] = lowest_uniq_timestamp(unique_timestamps, link['timestamp'])
|
|
|
|
unique_timestamps[link['timestamp']] = link
|
|
|
|
|
|
|
|
return unique_timestamps.values()
|
2017-10-18 18:38:17 -04:00
|
|
|
|
2017-10-23 05:58:41 -04:00
|
|
|
def sorted_links(links):
|
|
|
|
sort_func = lambda link: (link['timestamp'], link['url'])
|
|
|
|
return sorted(links, key=sort_func, reverse=True)
|
|
|
|
|
2017-10-18 18:38:17 -04:00
|
|
|
def links_after_timestamp(links, timestamp=None):
|
|
|
|
if not timestamp:
|
|
|
|
yield from links
|
|
|
|
return
|
|
|
|
|
|
|
|
for link in links:
|
|
|
|
try:
|
|
|
|
if float(link['timestamp']) <= float(timestamp):
|
|
|
|
yield link
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
print('Resume value and all timestamp values must be valid numbers.')
|
|
|
|
|
2017-10-18 20:33:31 -04:00
|
|
|
def lowest_uniq_timestamp(used_timestamps, timestamp):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2"""
|
|
|
|
|
2017-10-18 20:33:31 -04:00
|
|
|
timestamp = timestamp.split('.')[0]
|
|
|
|
nonce = 0
|
|
|
|
|
|
|
|
# first try 152323423 before 152323423.0
|
2017-10-18 18:38:17 -04:00
|
|
|
if timestamp not in used_timestamps:
|
|
|
|
return timestamp
|
|
|
|
|
|
|
|
new_timestamp = '{}.{}'.format(timestamp, nonce)
|
|
|
|
while new_timestamp in used_timestamps:
|
|
|
|
nonce += 1
|
|
|
|
new_timestamp = '{}.{}'.format(timestamp, nonce)
|
|
|
|
|
|
|
|
return new_timestamp
|