2017-05-05 05:00:30 -04:00
#!/usr/bin/env python3
# wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
# sudo sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list'
# apt update; apt install google-chrome-beta
import re
import os
import sys
2017-05-05 19:36:46 -04:00
import json
2017-05-05 05:00:30 -04:00
2017-06-11 14:45:22 -04:00
from datetime import datetime
2017-05-05 19:36:46 -04:00
import time
2017-05-29 14:00:46 -04:00
from subprocess import run , PIPE , DEVNULL
### SETTINGS
2017-05-05 05:00:30 -04:00
2017-05-05 07:27:05 -04:00
INDEX_TEMPLATE = ' index_template.html '
2017-05-05 05:00:30 -04:00
2017-05-05 07:27:05 -04:00
FETCH_WGET = True
FETCH_PDF = True
FETCH_SCREENSHOT = True
2017-05-29 14:00:46 -04:00
RESOLUTION = ' 1440,900 ' # screenshot resolution
2017-05-05 07:27:05 -04:00
FETCH_FAVICON = True
2017-05-29 14:00:46 -04:00
SUBMIT_ARCHIVE_DOT_ORG = True
2017-06-15 19:51:03 -04:00
ARCHIVE_PERMISSIONS = ' 755 '
2017-05-29 14:00:46 -04:00
CHROME_BINARY = ' google-chrome ' # change to chromium browser if using chromium
WGET_BINARY = ' wget '
2017-05-05 05:00:30 -04:00
2017-05-05 07:27:05 -04:00
def check_dependencies ( ) :
2017-05-29 14:00:46 -04:00
print ( ' [*] Checking Dependencies: ' )
if FETCH_PDF or FETCH_SCREENSHOT :
if run ( [ ' which ' , CHROME_BINARY ] ) . returncode :
print ( ' [X] Missing dependency: {} ' . format ( CHROME_BINARY ) )
2017-06-15 19:31:33 -04:00
print ( ' See https://github.com/pirate/bookmark-archiver for help. ' )
2017-05-29 14:00:46 -04:00
raise SystemExit ( 1 )
# parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
result = run ( [ CHROME_BINARY , ' --version ' ] , stdout = PIPE )
version = result . stdout . decode ( ' utf-8 ' ) . replace ( ' Google Chrome ' , ' ' ) . replace ( ' Chromium ' , ' ' ) . split ( ' ' , 1 ) [ 0 ] . split ( ' . ' , 1 ) [ 0 ] # TODO: regex might be better
if int ( version ) < 59 :
print ( ' [X] Chrome version must be 59 or greater for headless PDF and screenshot saving ' )
2017-06-15 19:31:33 -04:00
print ( ' See https://github.com/pirate/bookmark-archiver for help. ' )
2017-05-29 14:00:46 -04:00
raise SystemExit ( 1 )
if FETCH_WGET :
if run ( [ ' which ' , ' wget ' ] ) . returncode :
print ( ' [X] Missing dependency: wget ' )
2017-06-15 19:31:33 -04:00
print ( ' See https://github.com/pirate/bookmark-archiver for help. ' )
2017-05-29 14:00:46 -04:00
raise SystemExit ( 1 )
if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG :
if run ( [ ' which ' , ' curl ' ] ) . returncode :
print ( ' [X] Missing dependency: curl ' )
2017-06-15 19:31:33 -04:00
print ( ' See https://github.com/pirate/bookmark-archiver for help. ' )
2017-05-05 07:27:05 -04:00
raise SystemExit ( 1 )
2017-05-05 05:00:30 -04:00
2017-05-29 14:00:46 -04:00
### PARSING READER LIST EXPORTS
def get_link_type ( link ) :
if link [ ' base_url ' ] . endswith ( ' .pdf ' ) :
return ' PDF '
elif link [ ' base_url ' ] . rsplit ( ' . ' , 1 ) in ( ' pdf ' , ' png ' , ' jpg ' , ' jpeg ' , ' svg ' , ' bmp ' , ' gif ' , ' tiff ' , ' webp ' ) :
return ' image '
elif ' wikipedia.org ' in link [ ' domain ' ] :
return ' wiki '
elif ' youtube.com ' in link [ ' domain ' ] :
return ' youtube '
return None
2017-06-15 19:31:33 -04:00
def parse_pocket_export ( html_file ) :
html_file . seek ( 0 )
2017-05-05 10:54:18 -04:00
pattern = re . compile ( " ^ \\ s*<li><a href= \" (.+) \" time_added= \" ( \\ d+) \" tags= \" (.*) \" >(.+)</a></li> " , re . UNICODE ) # see sample input in ./example_ril_export.html
2017-06-15 19:31:33 -04:00
for line in html_file :
2017-05-05 05:00:30 -04:00
match = pattern . search ( line )
if match :
2017-05-22 11:40:16 -04:00
fixed_url = match . group ( 1 ) . replace ( ' http://www.readability.com/read?url= ' , ' ' ) # remove old readability prefixes to get original url
without_scheme = fixed_url . replace ( ' http:// ' , ' ' ) . replace ( ' https:// ' , ' ' )
2017-05-29 14:00:46 -04:00
info = {
2017-05-22 11:40:16 -04:00
' url ' : fixed_url ,
' domain ' : without_scheme . split ( ' / ' ) [ 0 ] , # without pathname
' base_url ' : without_scheme . split ( ' ? ' ) [ 0 ] , # without query args
2017-05-29 14:00:46 -04:00
' time ' : datetime . fromtimestamp ( int ( match . group ( 2 ) ) ) . strftime ( ' % Y- % m- %d % H: % M ' ) ,
2017-05-05 05:00:30 -04:00
' timestamp ' : match . group ( 2 ) ,
' tags ' : match . group ( 3 ) ,
2017-05-22 11:40:16 -04:00
' title ' : match . group ( 4 ) . replace ( ' — Readability ' , ' ' ) . replace ( ' http://www.readability.com/read?url= ' , ' ' ) or without_scheme ,
2017-05-05 05:00:30 -04:00
}
2017-05-29 14:00:46 -04:00
info [ ' type ' ] = get_link_type ( info )
yield info
2017-05-05 05:00:30 -04:00
2017-06-15 19:31:33 -04:00
def parse_json_export ( json_file ) :
""" pinboard exports are json """
json_file . seek ( 0 )
json_content = json . load ( json_file )
2017-05-05 19:36:46 -04:00
for line in json_content :
if line :
erg = line
2017-05-29 14:00:46 -04:00
info = {
2017-05-22 11:40:16 -04:00
' url ' : erg [ ' href ' ] ,
2017-05-05 19:36:46 -04:00
' domain ' : erg [ ' href ' ] . replace ( ' http:// ' , ' ' ) . replace ( ' https:// ' , ' ' ) . split ( ' / ' ) [ 0 ] ,
' base_url ' : erg [ ' href ' ] . replace ( ' https:// ' , ' ' ) . replace ( ' http:// ' , ' ' ) . split ( ' ? ' ) [ 0 ] ,
2017-06-15 01:45:40 -04:00
' time ' : datetime . fromtimestamp ( time . mktime ( time . strptime ( erg [ ' time ' ] . split ( ' , ' ) [ 0 ] , ' % Y- % m- %d T % H: % M: % SZ ' ) ) ) ,
' timestamp ' : time . mktime ( time . strptime ( erg [ ' time ' ] . split ( ' , ' ) [ 0 ] , ' % Y- % m- %d T % H: % M: % SZ ' ) ) ,
2017-05-05 19:36:46 -04:00
' tags ' : erg [ ' tags ' ] ,
2017-05-22 11:40:16 -04:00
' title ' : erg [ ' description ' ] . replace ( ' — Readability ' , ' ' ) ,
2017-05-05 19:36:46 -04:00
}
2017-05-29 14:00:46 -04:00
info [ ' type ' ] = get_link_type ( info )
yield info
2017-05-05 19:36:46 -04:00
2017-06-15 19:31:33 -04:00
def parse_bookmarks_export ( html_file ) :
html_file . seek ( 0 )
2017-06-11 14:45:22 -04:00
pattern = re . compile ( " <a href= \" (.+?) \" add_date= \" ( \\ d+) \" [^>]*>(.+)</a> " , re . UNICODE | re . IGNORECASE )
2017-06-15 19:31:33 -04:00
for line in html_file :
2017-06-11 14:45:22 -04:00
match = pattern . search ( line )
if match :
url = match . group ( 1 )
secs = match . group ( 2 )
dt = datetime . fromtimestamp ( int ( secs ) )
info = {
' url ' : url ,
' domain ' : url . replace ( ' http:// ' , ' ' ) . replace ( ' https:// ' , ' ' ) . split ( ' / ' ) [ 0 ] ,
' base_url ' : url . replace ( ' https:// ' , ' ' ) . replace ( ' http:// ' , ' ' ) . split ( ' ? ' ) [ 0 ] ,
' time ' : dt ,
' timestamp ' : secs ,
' tags ' : " " ,
2017-06-15 01:45:40 -04:00
' title ' : match . group ( 3 ) ,
2017-06-11 14:45:22 -04:00
}
info [ ' type ' ] = get_link_type ( info )
yield info
2017-06-08 16:52:39 -04:00
2017-05-05 05:00:30 -04:00
2017-05-29 14:00:46 -04:00
### ACHIVING FUNCTIONS
2017-05-05 05:00:30 -04:00
2017-05-05 07:27:05 -04:00
def fetch_wget ( out_dir , link , overwrite = False ) :
2017-05-05 05:00:30 -04:00
# download full site
2017-06-15 20:12:56 -04:00
domain = link [ ' base_url ' ] . split ( ' / ' , 1 ) [ 0 ]
if not os . path . exists ( ' {} / {} ' . format ( out_dir , domain ) ) or overwrite :
2017-05-05 05:00:30 -04:00
print ( ' - Downloading Full Site ' )
CMD = [
* ' wget --no-clobber --page-requisites --adjust-extension --convert-links --no-parent ' . split ( ' ' ) ,
link [ ' url ' ] ,
]
try :
2017-05-05 07:27:05 -04:00
run ( CMD , stdout = DEVNULL , stderr = DEVNULL , cwd = out_dir , timeout = 20 ) # dom.html
2017-05-05 05:00:30 -04:00
except Exception as e :
2017-06-15 18:31:56 -04:00
print ( ' Exception: {} {} ' . format ( e . __class__ . __name__ , e ) )
2017-05-05 05:00:30 -04:00
else :
print ( ' √ Skipping site download ' )
2017-05-05 07:27:05 -04:00
def fetch_pdf ( out_dir , link , overwrite = False ) :
2017-05-05 05:00:30 -04:00
# download PDF
2017-06-15 01:45:40 -04:00
if ( not os . path . exists ( ' {} /output.pdf ' . format ( out_dir ) ) or overwrite ) and link [ ' type ' ] not in ( ' PDF ' , ' image ' ) :
2017-05-05 05:00:30 -04:00
print ( ' - Printing PDF ' )
2017-05-29 14:00:46 -04:00
chrome_args = ' --headless --disable-gpu --print-to-pdf ' . split ( ' ' )
2017-05-05 05:00:30 -04:00
try :
2017-05-29 14:00:46 -04:00
run ( [ CHROME_BINARY , * chrome_args , link [ ' url ' ] ] , stdout = DEVNULL , stderr = DEVNULL , cwd = out_dir , timeout = 20 ) # output.pdf
2017-06-15 20:12:56 -04:00
if run ( [ ' chmod ' , ARCHIVE_PERMISSIONS , ' output.pdf ' ] , stdout = DEVNULL , stderr = DEVNULL , timeout = 5 ) . returncode :
raise Exception ( ' Failed to print PDF ' )
2017-05-05 05:00:30 -04:00
except Exception as e :
2017-06-15 18:31:56 -04:00
print ( ' Exception: {} {} ' . format ( e . __class__ . __name__ , e ) )
2017-05-05 05:00:30 -04:00
else :
print ( ' √ Skipping PDF print ' )
2017-05-05 07:27:05 -04:00
def fetch_screenshot ( out_dir , link , overwrite = False ) :
2017-05-05 05:00:30 -04:00
# take screenshot
2017-06-15 01:45:40 -04:00
if ( not os . path . exists ( ' {} /screenshot.png ' . format ( out_dir ) ) or overwrite ) and link [ ' type ' ] not in ( ' PDF ' , ' image ' ) :
2017-05-05 05:00:30 -04:00
print ( ' - Snapping Screenshot ' )
2017-05-29 14:00:46 -04:00
chrome_args = ' --headless --disable-gpu --screenshot ' . split ( ' ' )
2017-05-05 05:00:30 -04:00
try :
2017-05-29 14:00:46 -04:00
run ( [ CHROME_BINARY , * chrome_args , ' --window-size= {} ' . format ( RESOLUTION ) , link [ ' url ' ] ] , stdout = DEVNULL , stderr = DEVNULL , cwd = out_dir , timeout = 20 ) # sreenshot.png
2017-06-15 20:12:56 -04:00
if run ( [ ' chmod ' , ARCHIVE_PERMISSIONS , ' screenshot.png ' ] , stdout = DEVNULL , stderr = DEVNULL , timeout = 5 ) . returncode :
raise Exception ( ' Failed to take screenshot ' )
2017-05-05 05:00:30 -04:00
except Exception as e :
2017-06-15 18:31:56 -04:00
print ( ' Exception: {} {} ' . format ( e . __class__ . __name__ , e ) )
2017-05-05 05:00:30 -04:00
else :
print ( ' √ Skipping screenshot ' )
2017-05-29 14:00:46 -04:00
def archive_dot_org ( out_dir , link , overwrite = False ) :
# submit to archive.org
if ( not os . path . exists ( ' {} /archive.org.txt ' . format ( out_dir ) ) or overwrite ) :
print ( ' - Submitting to archive.org ' )
2017-06-15 18:32:11 -04:00
submit_url = ' https://web.archive.org/save/ {} ' . format ( link [ ' url ' ] . split ( ' ? ' , 1 ) [ 0 ] )
2017-05-29 14:00:46 -04:00
success = False
try :
result = run ( [ ' curl ' , ' -I ' , submit_url ] , stdout = PIPE , stderr = DEVNULL , cwd = out_dir , timeout = 20 ) # archive.org
headers = result . stdout . splitlines ( )
content_location = [ h for h in headers if b ' Content-Location: ' in h ]
if content_location :
archive_path = content_location [ 0 ] . split ( b ' Content-Location: ' , 1 ) [ - 1 ] . decode ( ' utf-8 ' )
saved_url = ' https://web.archive.org {} ' . format ( archive_path )
success = True
else :
raise Exception ( ' Failed to find Content-Location URL in Archive.org response headers. ' )
except Exception as e :
2017-06-15 18:31:56 -04:00
print ( ' Exception: {} {} ' . format ( e . __class__ . __name__ , e ) )
2017-05-29 14:00:46 -04:00
if success :
with open ( ' {} /archive.org.txt ' . format ( out_dir ) , ' w ' ) as f :
f . write ( saved_url )
else :
print ( ' √ Skipping archive.org ' )
2017-05-05 07:27:05 -04:00
def fetch_favicon ( out_dir , link , overwrite = False ) :
2017-05-05 05:00:30 -04:00
# download favicon
if not os . path . exists ( ' {} /favicon.ico ' . format ( out_dir ) ) or overwrite :
print ( ' - Fetching Favicon ' )
CMD = ' curl https://www.google.com/s2/favicons?domain= {domain} ' . format ( * * link ) . split ( ' ' )
fout = open ( ' {} /favicon.ico ' . format ( out_dir ) , ' w ' )
try :
2017-05-05 07:27:05 -04:00
run ( [ * CMD ] , stdout = fout , stderr = DEVNULL , cwd = out_dir , timeout = 20 ) # dom.html
2017-05-05 05:00:30 -04:00
except Exception as e :
2017-06-15 18:31:56 -04:00
print ( ' Exception: {} {} ' . format ( e . __class__ . __name__ , e ) )
2017-05-05 05:00:30 -04:00
fout . close ( )
else :
print ( ' √ Skipping favicon ' )
2017-05-05 07:27:05 -04:00
2017-05-29 14:00:46 -04:00
### ORCHESTRATION
2017-06-15 18:33:01 -04:00
def next_uniq_timestamp ( used_timestamps , timestamp ) :
""" resolve duplicate timestamps by appending a decimal """
if timestamp not in used_timestamps :
return timestamp
if ' . ' in timestamp :
timestamp , nonce = timestamp . split ( ' . ' )
nonce = int ( nonce )
else :
nonce = 1
new_timestamp = ' {} . {} ' . format ( timestamp , nonce )
while new_timestamp in used_timestamps :
nonce + = 1
new_timestamp = ' {} . {} ' . format ( timestamp , nonce )
return new_timestamp
def uniquefied_links ( links ) :
""" uniqueify link timestamps by de-duping using url, returns links sorted most recent -> oldest
needed because firefox will produce exports where many links share the same timestamp , this func
ensures that all non - duplicate links have monotonically increasing timestamps """
links = list ( reversed ( sorted ( links , key = lambda l : ( l [ ' timestamp ' ] , l [ ' url ' ] ) ) ) )
seen_timestamps = { }
for link in links :
t = link [ ' timestamp ' ]
if t in seen_timestamps :
if link [ ' url ' ] == seen_timestamps [ t ] [ ' url ' ] :
# don't create new unique timestamp if link is the same
continue
else :
# resolve duplicate timstamp by appending a decimal
link [ ' timestamp ' ] = next_uniq_timestamp ( seen_timestamps , link [ ' timestamp ' ] )
seen_timestamps [ link [ ' timestamp ' ] ] = link
return links
def valid_links ( links ) :
return ( link for link in links if link [ ' url ' ] . startswith ( ' http ' ) or link [ ' url ' ] . startswith ( ' ftp ' ) )
2017-05-29 14:00:46 -04:00
def dump_index ( links , service ) :
with open ( INDEX_TEMPLATE , ' r ' ) as f :
index_html = f . read ( )
link_html = """ \
< tr >
< td > { time } < / td >
< td > < a href = " archive/ {timestamp} / {base_url} " style = " font-size:1.4em;text-decoration:none;color:black; " title = " {title} " >
< img src = " archive/ {timestamp} /favicon.ico " >
{ title } < small style = " background-color: #eee;border-radius:4px; float:right " > { tags } < / small >
< / td >
< td style = " text-align:center " > < a href = " archive/ {timestamp} / " title = " Files " > 📂 < / a > < / td >
< td style = " text-align:center " > < a href = " {pdf_link} " title = " PDF " > 📄 < / a > < / td >
< td style = " text-align:center " > < a href = " {screenshot_link} " title = " Screenshot " > 🖼 < / a > < / td >
< td style = " text-align:center " > < a href = " https://web.archive.org/web/ {base_url} " title = " Archive.org " > 🏛 < / a > < / td >
< td > 🔗 < img src = " https://www.google.com/s2/favicons?domain= {domain} " height = " 16px " > < a href = " {url} " > { url } < / a > < / td >
< / tr > """
def get_template_vars ( link ) :
# since we dont screenshot or PDF links that are images or PDFs, change those links to point to the wget'ed file
link_info = { * * link }
if link [ ' type ' ] :
link_info . update ( { ' title ' : ' {title} ( {type} ) ' . format ( * * link ) } )
if link [ ' type ' ] in ( ' PDF ' , ' image ' ) :
link_info . update ( {
' pdf_link ' : ' archive/ {timestamp} / {base_url} ' . format ( * * link ) ,
' screenshot_link ' : ' archive/ {timestamp} / {base_url} ' . format ( * * link ) ,
} )
else :
link_info . update ( {
' pdf_link ' : ' archive/ {timestamp} /output.pdf ' . format ( * * link ) ,
' screenshot_link ' : ' archive/ {timestamp} /screenshot.png ' . format ( * * link )
} )
return link_info
with open ( ' ' . join ( ( service , ' /index.html ' ) ) , ' w ' ) as f :
article_rows = ' \n ' . join (
link_html . format ( * * get_template_vars ( link ) ) for link in links
)
f . write ( index_html . format ( datetime . now ( ) . strftime ( ' % Y- % m- %d % H: % M ' ) , article_rows ) )
2017-05-05 19:36:46 -04:00
def dump_website ( link , service , overwrite = False ) :
2017-05-05 07:27:05 -04:00
""" download the DOM, PDF, and a screenshot into a folder named after the link ' s timestamp """
2017-06-15 18:32:37 -04:00
print ( ' [+] [ {timestamp} ( {time} )] " {title} " : {base_url} ' . format ( * * link ) )
2017-05-05 07:27:05 -04:00
2017-05-05 19:36:46 -04:00
out_dir = ' ' . join ( ( service , ' /archive/ {timestamp} ' ) ) . format ( * * link )
2017-05-05 07:27:05 -04:00
if not os . path . exists ( out_dir ) :
os . makedirs ( out_dir )
2017-06-15 19:51:03 -04:00
run ( [ ' chmod ' , ARCHIVE_PERMISSIONS , out_dir ] , timeout = 5 )
2017-05-29 14:00:46 -04:00
if link [ ' type ' ] :
print ( ' i Type: {} ' . format ( link [ ' type ' ] ) )
2017-05-05 07:27:05 -04:00
2017-06-15 18:33:01 -04:00
if not link [ ' url ' ] . startswith ( ' http ' ) :
print ( ' X Skipping: invalid link. ' )
return
2017-05-05 07:27:05 -04:00
if FETCH_WGET :
fetch_wget ( out_dir , link , overwrite = overwrite )
if FETCH_PDF :
fetch_pdf ( out_dir , link , overwrite = overwrite )
if FETCH_SCREENSHOT :
fetch_screenshot ( out_dir , link , overwrite = overwrite )
2017-05-29 14:00:46 -04:00
if SUBMIT_ARCHIVE_DOT_ORG :
archive_dot_org ( out_dir , link , overwrite = overwrite )
2017-05-05 07:27:05 -04:00
if FETCH_FAVICON :
fetch_favicon ( out_dir , link , overwrite = overwrite )
2017-06-15 19:31:33 -04:00
def create_archive ( export_file , service = None , resume = None ) :
2017-05-29 14:00:46 -04:00
with open ( export_file , ' r ' , encoding = ' utf-8 ' ) as f :
2017-06-15 19:31:33 -04:00
print ( ' [+] [ {} ] Starting archive from {} export file. ' . format ( datetime . now ( ) . strftime ( ' % Y- % m- %d % H: % M: % S ' ) , export_file ) )
2017-05-05 19:36:46 -04:00
if service == " pocket " :
links = parse_pocket_export ( f )
elif service == " pinboard " :
2017-06-15 19:31:33 -04:00
links = parse_json_export ( f )
2017-06-08 16:52:39 -04:00
elif service == " bookmarks " :
links = parse_bookmarks_export ( f )
2017-06-15 19:31:33 -04:00
else :
# try all parsers until one works
try :
links = list ( parse_json_export ( f ) )
service = ' pinboard '
except Exception :
links = list ( parse_pocket_export ( f ) )
if links :
service = ' pocket '
else :
links = list ( parse_bookmarks_export ( f ) )
service = ' bookmarks '
2017-06-15 18:32:37 -04:00
links = valid_links ( links ) # remove chrome://, about:, mailto: etc.
2017-06-15 19:31:33 -04:00
links = uniquefied_links ( links ) # fix duplicate timestamps, returns sorted list
2017-05-05 05:00:30 -04:00
if resume :
2017-06-15 18:32:37 -04:00
try :
links = [ link for link in links if float ( link [ ' timestamp ' ] ) > = float ( resume ) ]
except TypeError :
print ( ' Resume value and all timestamp values must be valid numbers. ' )
2017-05-05 05:00:30 -04:00
if not links :
2017-06-15 01:45:40 -04:00
print ( ' [X] No links found in {} , is it a {} export file? ' . format ( export_file , service ) )
2017-05-05 05:00:30 -04:00
raise SystemExit ( 1 )
2017-06-15 19:31:33 -04:00
if not os . path . exists ( service ) :
os . makedirs ( service )
if not os . path . exists ( ' ' . join ( ( service , ' /archive ' ) ) ) :
os . makedirs ( ' ' . join ( ( service , ' /archive ' ) ) )
2017-06-15 18:32:37 -04:00
2017-05-05 19:36:46 -04:00
dump_index ( links , service )
2017-05-05 05:00:30 -04:00
2017-06-15 19:51:03 -04:00
run ( [ ' chmod ' , ' -R ' , ARCHIVE_PERMISSIONS , service ] , timeout = 30 )
2017-05-05 05:00:30 -04:00
2017-06-15 18:32:37 -04:00
print ( ' [*] [ {} ] Created archive index with {} links. ' . format ( datetime . now ( ) . strftime ( ' % Y- % m- %d % H: % M: % S ' ) , len ( links ) ) )
2017-05-05 05:00:30 -04:00
2017-05-05 07:27:05 -04:00
check_dependencies ( )
2017-05-05 05:00:30 -04:00
for link in links :
2017-05-05 19:36:46 -04:00
dump_website ( link , service )
2017-05-05 05:00:30 -04:00
print ( ' [√] [ {} ] Archive complete. ' . format ( datetime . now ( ) ) )
2017-05-29 14:00:46 -04:00
2017-05-05 05:00:30 -04:00
if __name__ == ' __main__ ' :
2017-05-06 08:23:49 -04:00
argc = len ( sys . argv )
2017-05-29 14:00:46 -04:00
export_file = sys . argv [ 1 ] if argc > 1 else " ril_export.html " # path to export file
2017-06-15 19:31:33 -04:00
export_type = sys . argv [ 2 ] if argc > 2 else None # select export_type for file format select
2017-05-29 14:00:46 -04:00
resume_from = sys . argv [ 3 ] if argc > 3 else None # timestamp to resume dowloading from
2017-05-05 05:00:30 -04:00
2017-05-29 14:00:46 -04:00
create_archive ( export_file , export_type , resume = resume_from )