2017-10-18 18:38:17 -04:00
|
|
|
import os
|
|
|
|
|
|
|
|
from functools import wraps
|
|
|
|
from datetime import datetime
|
|
|
|
from subprocess import run, PIPE, DEVNULL
|
|
|
|
|
|
|
|
from index import html_appended_url, parse_json_link_index, write_link_index
|
|
|
|
from links import links_after_timestamp
|
|
|
|
from config import (
|
|
|
|
ARCHIVE_DIR,
|
|
|
|
CHROME_BINARY,
|
|
|
|
FETCH_WGET,
|
|
|
|
FETCH_WGET_REQUISITES,
|
|
|
|
FETCH_PDF,
|
|
|
|
FETCH_SCREENSHOT,
|
|
|
|
RESOLUTION,
|
|
|
|
SUBMIT_ARCHIVE_DOT_ORG,
|
|
|
|
FETCH_AUDIO,
|
|
|
|
FETCH_VIDEO,
|
|
|
|
FETCH_FAVICON,
|
|
|
|
WGET_USER_AGENT,
|
2017-10-30 05:01:59 -04:00
|
|
|
CHROME_USER_DATA_DIR,
|
2017-10-18 18:38:17 -04:00
|
|
|
TIMEOUT,
|
|
|
|
ANSI,
|
|
|
|
)
|
|
|
|
from util import (
|
|
|
|
check_dependencies,
|
|
|
|
progress,
|
|
|
|
chmod_file,
|
|
|
|
)
|
|
|
|
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
_RESULTS_TOTALS = { # globals are bad, mmkay
|
2017-10-18 18:38:17 -04:00
|
|
|
'skipped': 0,
|
|
|
|
'succeded': 0,
|
|
|
|
'failed': 0,
|
|
|
|
}
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
def archive_links(archive_path, links, source=None, resume=None):
|
2017-10-23 05:57:34 -04:00
|
|
|
check_dependencies()
|
|
|
|
|
|
|
|
to_archive = links_after_timestamp(links, resume)
|
|
|
|
try:
|
|
|
|
for idx, link in enumerate(to_archive):
|
2017-10-30 07:09:33 -04:00
|
|
|
link_dir = os.path.join(archive_path, link['timestamp'])
|
2017-10-30 04:31:28 -04:00
|
|
|
archive_link(link_dir, link)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
except (KeyboardInterrupt, SystemExit, Exception) as e:
|
2017-10-30 07:09:33 -04:00
|
|
|
print('{red}[X] Index is up-to-date, archive update paused on link {idx}/{total}{reset}'.format(
|
2017-10-23 05:57:34 -04:00
|
|
|
**ANSI,
|
|
|
|
idx=idx,
|
|
|
|
total=len(list(to_archive)),
|
|
|
|
))
|
|
|
|
print(' Continue where you left off by running:')
|
|
|
|
print(' ./archive.py {} {}'.format(
|
2017-10-30 07:09:33 -04:00
|
|
|
source,
|
2017-10-23 05:57:34 -04:00
|
|
|
link['timestamp'],
|
|
|
|
))
|
|
|
|
if not isinstance(e, KeyboardInterrupt):
|
|
|
|
raise e
|
|
|
|
raise SystemExit(1)
|
|
|
|
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
def archive_link(link_dir, link, overwrite=False):
|
2017-10-23 05:57:34 -04:00
|
|
|
"""download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
update_existing = os.path.exists(link_dir)
|
|
|
|
if update_existing:
|
|
|
|
link = {
|
|
|
|
**parse_json_link_index(link_dir),
|
|
|
|
**link,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
os.makedirs(link_dir)
|
|
|
|
|
|
|
|
log_link_archive(link_dir, link, update_existing)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
if FETCH_WGET:
|
2017-10-30 07:09:33 -04:00
|
|
|
link = fetch_wget(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
if FETCH_PDF:
|
2017-10-30 07:09:33 -04:00
|
|
|
link = fetch_pdf(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
if FETCH_SCREENSHOT:
|
2017-10-30 07:09:33 -04:00
|
|
|
link = fetch_screenshot(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
if SUBMIT_ARCHIVE_DOT_ORG:
|
2017-10-30 07:09:33 -04:00
|
|
|
link = archive_dot_org(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
# if FETCH_AUDIO:
|
2017-10-30 07:09:33 -04:00
|
|
|
# link = fetch_audio(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
# if FETCH_VIDEO:
|
2017-10-30 07:09:33 -04:00
|
|
|
# link = fetch_video(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
if FETCH_FAVICON:
|
2017-10-30 07:09:33 -04:00
|
|
|
link = fetch_favicon(link_dir, link, overwrite=overwrite)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
write_link_index(link_dir, link)
|
2017-10-23 05:57:34 -04:00
|
|
|
|
|
|
|
return link
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
def log_link_archive(link_dir, link, update_existing):
|
2017-10-30 04:36:42 -04:00
|
|
|
print('[{symbol_color}{symbol}{reset}] [{timestamp}] "{title}": {blue}{base_url}{reset}'.format(
|
|
|
|
symbol='*' if update_existing else '+',
|
|
|
|
symbol_color=ANSI['black' if update_existing else 'green'],
|
|
|
|
**link,
|
|
|
|
**ANSI,
|
|
|
|
))
|
|
|
|
if link['type']:
|
|
|
|
print(' i Type: {}'.format(link['type']))
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
print(' {} ({})'.format(link_dir, 'updating' if update_existing else 'creating'))
|
2017-10-30 04:36:42 -04:00
|
|
|
|
|
|
|
|
2017-10-23 05:57:34 -04:00
|
|
|
|
2017-10-18 18:38:17 -04:00
|
|
|
def attach_result_to_link(method):
|
2017-10-23 05:57:34 -04:00
|
|
|
"""
|
|
|
|
Instead of returning a result={output:'...', status:'success'} object,
|
|
|
|
attach that result to the links's history & latest fields, then return
|
|
|
|
the updated link object.
|
|
|
|
"""
|
2017-10-18 18:38:17 -04:00
|
|
|
def decorator(fetch_func):
|
|
|
|
@wraps(fetch_func)
|
2017-10-30 07:09:33 -04:00
|
|
|
def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
|
2017-10-18 18:38:17 -04:00
|
|
|
# initialize methods and history json field on link
|
2017-10-23 05:57:34 -04:00
|
|
|
link['latest'] = link.get('latest') or {}
|
|
|
|
link['latest'][method] = link['latest'].get(method) or None
|
2017-10-18 18:38:17 -04:00
|
|
|
link['history'] = link.get('history') or {}
|
|
|
|
link['history'][method] = link['history'].get(method) or []
|
|
|
|
|
|
|
|
start_ts = datetime.now().timestamp()
|
|
|
|
|
|
|
|
# if a valid method output is already present, dont run the fetch function
|
2017-10-23 05:57:34 -04:00
|
|
|
if link['latest'][method] and not overwrite:
|
2017-10-18 18:38:17 -04:00
|
|
|
print(' √ Skipping: {}'.format(method))
|
|
|
|
result = None
|
|
|
|
else:
|
|
|
|
print(' - Fetching: {}'.format(method))
|
2017-10-30 07:09:33 -04:00
|
|
|
result = fetch_func(link_dir, link, **kwargs)
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
end_ts = datetime.now().timestamp()
|
|
|
|
duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
|
|
|
|
|
|
|
|
# append a history item recording fail/success
|
|
|
|
history_entry = {
|
|
|
|
'timestamp': str(start_ts).split('.')[0],
|
|
|
|
}
|
|
|
|
if result is None:
|
|
|
|
history_entry['status'] = 'skipped'
|
|
|
|
elif isinstance(result.get('output'), Exception):
|
|
|
|
history_entry['status'] = 'failed'
|
|
|
|
history_entry['duration'] = duration
|
|
|
|
history_entry.update(result or {})
|
|
|
|
link['history'][method].append(history_entry)
|
|
|
|
else:
|
|
|
|
history_entry['status'] = 'succeded'
|
|
|
|
history_entry['duration'] = duration
|
|
|
|
history_entry.update(result or {})
|
|
|
|
link['history'][method].append(history_entry)
|
2017-10-23 05:57:34 -04:00
|
|
|
link['latest'][method] = result['output']
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
_RESULTS_TOTALS[history_entry['status']] += 1
|
|
|
|
|
|
|
|
return link
|
|
|
|
return timed_fetch_func
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('wget')
|
2017-10-30 07:09:33 -04:00
|
|
|
def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""download full site using wget"""
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
if os.path.exists(os.path.join(link_dir, link['domain'])):
|
2017-10-18 18:38:17 -04:00
|
|
|
return {'output': html_appended_url(link), 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
|
|
|
*'wget --timestamping --adjust-extension --no-parent'.split(' '), # Docs: https://www.gnu.org/software/wget/manual/wget.html
|
|
|
|
*(('--page-requisites', '--convert-links') if FETCH_WGET_REQUISITES else ()),
|
|
|
|
*(('--user-agent="{}"'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
|
|
|
|
link['url'],
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2017-10-30 07:09:33 -04:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # index.html
|
2017-10-18 18:38:17 -04:00
|
|
|
end()
|
|
|
|
output = html_appended_url(link)
|
|
|
|
if result.returncode > 0:
|
|
|
|
print(' got wget response code {}:'.format(result.returncode))
|
2017-10-18 18:47:19 -04:00
|
|
|
print('\n'.join(' ' + line for line in (result.stderr or result.stdout).decode().rsplit('\n', 10)[-10:] if line.strip()))
|
2017-10-18 18:38:17 -04:00
|
|
|
# raise Exception('Failed to wget download')
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
2017-10-30 07:09:33 -04:00
|
|
|
print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
|
2017-10-18 18:38:17 -04:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('pdf')
|
2017-10-30 07:09:33 -04:00
|
|
|
def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""print PDF of site to file using chrome --headless"""
|
|
|
|
|
|
|
|
if link['type'] in ('PDF', 'image'):
|
|
|
|
return {'output': html_appended_url(link)}
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'output.pdf')):
|
2017-10-18 18:38:17 -04:00
|
|
|
return {'output': 'output.pdf', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
2017-10-30 07:09:33 -04:00
|
|
|
*chrome_headless(user_data_dir=user_data_dir),
|
|
|
|
'--print-to-pdf',
|
2017-10-18 18:38:17 -04:00
|
|
|
link['url']
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2017-10-30 07:09:33 -04:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # output.pdf
|
2017-10-18 18:38:17 -04:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
2017-10-18 18:47:19 -04:00
|
|
|
print(' ', (result.stderr or result.stdout).decode())
|
2017-10-18 18:38:17 -04:00
|
|
|
raise Exception('Failed to print PDF')
|
|
|
|
output = 'output.pdf'
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
2017-10-30 07:09:33 -04:00
|
|
|
print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
|
2017-10-18 18:38:17 -04:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('screenshot')
|
2017-10-30 07:09:33 -04:00
|
|
|
def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""take screenshot of site using chrome --headless"""
|
|
|
|
|
|
|
|
if link['type'] in ('PDF', 'image'):
|
|
|
|
return {'output': html_appended_url(link)}
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
|
2017-10-18 18:38:17 -04:00
|
|
|
return {'output': 'screenshot.png', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
2017-10-30 07:09:33 -04:00
|
|
|
*chrome_headless(user_data_dir=user_data_dir),
|
|
|
|
'--screenshot',
|
2017-10-18 18:38:17 -04:00
|
|
|
'--window-size={}'.format(resolution),
|
|
|
|
link['url']
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2017-10-30 07:09:33 -04:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # sreenshot.png
|
2017-10-18 18:38:17 -04:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
2017-10-18 18:47:19 -04:00
|
|
|
print(' ', (result.stderr or result.stdout).decode())
|
2017-10-18 18:38:17 -04:00
|
|
|
raise Exception('Failed to take screenshot')
|
2017-10-30 07:09:33 -04:00
|
|
|
chmod_file('screenshot.png', cwd=link_dir)
|
2017-10-18 18:38:17 -04:00
|
|
|
output = 'screenshot.png'
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
2017-10-30 07:09:33 -04:00
|
|
|
print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
|
2017-10-18 18:38:17 -04:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('archive_org')
|
2017-10-30 07:09:33 -04:00
|
|
|
def archive_dot_org(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""submit site to archive.org for archiving via their service, save returned archive url"""
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
path = os.path.join(link_dir, 'archive.org.txt')
|
2017-10-18 18:38:17 -04:00
|
|
|
if os.path.exists(path):
|
|
|
|
archive_org_url = open(path, 'r').read().strip()
|
|
|
|
return {'output': archive_org_url, 'status': 'skipped'}
|
|
|
|
|
|
|
|
submit_url = 'https://web.archive.org/save/{}'.format(link['url'].split('?', 1)[0])
|
|
|
|
|
|
|
|
success = False
|
|
|
|
CMD = ['curl', '-I', submit_url]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2017-10-30 07:09:33 -04:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # archive.org.txt
|
2017-10-18 18:38:17 -04:00
|
|
|
end()
|
|
|
|
|
|
|
|
# Parse archive.org response headers
|
|
|
|
headers = result.stdout.splitlines()
|
|
|
|
content_location = [h for h in headers if b'Content-Location: ' in h]
|
2017-10-18 18:47:19 -04:00
|
|
|
errors = [h for h in headers if h and b'X-Archive-Wayback-Runtime-Error: ' in h]
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
if content_location:
|
|
|
|
archive_path = content_location[0].split(b'Content-Location: ', 1)[-1].decode('utf-8')
|
|
|
|
saved_url = 'https://web.archive.org{}'.format(archive_path)
|
|
|
|
success = True
|
|
|
|
|
|
|
|
elif len(errors) == 1 and b'RobotAccessControlException' in errors[0]:
|
|
|
|
output = submit_url
|
|
|
|
# raise Exception('Archive.org denied by {}/robots.txt'.format(link['domain']))
|
|
|
|
elif errors:
|
|
|
|
raise Exception(', '.join(e.decode() for e in errors))
|
|
|
|
else:
|
|
|
|
raise Exception('Failed to find "Content-Location" URL header in Archive.org response.')
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
|
|
|
print(' Visit url to see output:', ' '.join(CMD))
|
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
output = e
|
|
|
|
|
|
|
|
if success:
|
2017-10-30 07:09:33 -04:00
|
|
|
with open(os.path.join(link_dir, 'archive.org.txt'), 'w', encoding='utf-8') as f:
|
2017-10-18 18:38:17 -04:00
|
|
|
f.write(saved_url)
|
2017-10-30 07:09:33 -04:00
|
|
|
chmod_file('archive.org.txt', cwd=link_dir)
|
2017-10-18 18:38:17 -04:00
|
|
|
output = saved_url
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
@attach_result_to_link('favicon')
|
2017-10-30 07:09:33 -04:00
|
|
|
def fetch_favicon(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 18:38:17 -04:00
|
|
|
"""download site favicon from google's favicon api"""
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
|
2017-10-18 18:38:17 -04:00
|
|
|
return {'output': 'favicon.ico', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = ['curl', 'https://www.google.com/s2/favicons?domain={domain}'.format(**link)]
|
2017-10-30 07:09:33 -04:00
|
|
|
fout = open('{}/favicon.ico'.format(link_dir), 'w')
|
2017-10-18 18:38:17 -04:00
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2017-10-30 07:09:33 -04:00
|
|
|
run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # favicon.ico
|
2017-10-18 18:38:17 -04:00
|
|
|
fout.close()
|
|
|
|
end()
|
2017-10-30 07:09:33 -04:00
|
|
|
chmod_file('favicon.ico', cwd=link_dir)
|
2017-10-18 18:38:17 -04:00
|
|
|
output = 'favicon.ico'
|
|
|
|
except Exception as e:
|
|
|
|
fout.close()
|
|
|
|
end()
|
|
|
|
print(' Run to see full output:', ' '.join(CMD))
|
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
# @attach_result_to_link('audio')
|
2017-10-30 07:09:33 -04:00
|
|
|
# def fetch_audio(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 18:38:17 -04:00
|
|
|
# """Download audio rip using youtube-dl"""
|
|
|
|
|
|
|
|
# if link['type'] not in ('soundcloud',)\
|
|
|
|
# and 'audio' not in link['tags']:
|
|
|
|
# return
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
# path = os.path.join(link_dir, 'audio')
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
# if not os.path.exists(path) or overwrite:
|
|
|
|
# print(' - Downloading audio')
|
|
|
|
# CMD = [
|
|
|
|
# "youtube-dl -x --audio-format mp3 --audio-quality 0 -o '%(title)s.%(ext)s'",
|
|
|
|
# link['url'],
|
|
|
|
# ]
|
|
|
|
# end = progress(timeout, prefix=' ')
|
|
|
|
# try:
|
2017-10-30 07:09:33 -04:00
|
|
|
# result = run(CMD, stdout=DEVNULL, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # audio/audio.mp3
|
2017-10-18 18:38:17 -04:00
|
|
|
# end()
|
|
|
|
# if result.returncode:
|
|
|
|
# print(' ', result.stderr.decode())
|
|
|
|
# raise Exception('Failed to download audio')
|
2017-10-30 07:09:33 -04:00
|
|
|
# chmod_file('audio.mp3', cwd=link_dir)
|
2017-10-18 18:38:17 -04:00
|
|
|
# return 'audio.mp3'
|
|
|
|
# except Exception as e:
|
|
|
|
# end()
|
2017-10-30 07:09:33 -04:00
|
|
|
# print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
|
2017-10-18 18:38:17 -04:00
|
|
|
# print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
# raise
|
|
|
|
# else:
|
|
|
|
# print(' √ Skipping audio download')
|
|
|
|
|
|
|
|
# @attach_result_to_link('video')
|
2017-10-30 07:09:33 -04:00
|
|
|
# def fetch_video(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 18:38:17 -04:00
|
|
|
# """Download video rip using youtube-dl"""
|
|
|
|
|
|
|
|
# if link['type'] not in ('youtube', 'youku', 'vimeo')\
|
|
|
|
# and 'video' not in link['tags']:
|
|
|
|
# return
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
# path = os.path.join(link_dir, 'video')
|
2017-10-18 18:38:17 -04:00
|
|
|
|
|
|
|
# if not os.path.exists(path) or overwrite:
|
|
|
|
# print(' - Downloading video')
|
|
|
|
# CMD = [
|
|
|
|
# "youtube-dl -x --video-format mp4 --audio-quality 0 -o '%(title)s.%(ext)s'",
|
|
|
|
# link['url'],
|
|
|
|
# ]
|
|
|
|
# end = progress(timeout, prefix=' ')
|
|
|
|
# try:
|
2017-10-30 07:09:33 -04:00
|
|
|
# result = run(CMD, stdout=DEVNULL, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # video/movie.mp4
|
2017-10-18 18:38:17 -04:00
|
|
|
# end()
|
|
|
|
# if result.returncode:
|
|
|
|
# print(' ', result.stderr.decode())
|
|
|
|
# raise Exception('Failed to download video')
|
2017-10-30 07:09:33 -04:00
|
|
|
# chmod_file('video.mp4', cwd=link_dir)
|
2017-10-18 18:38:17 -04:00
|
|
|
# return 'video.mp4'
|
|
|
|
# except Exception as e:
|
|
|
|
# end()
|
2017-10-30 07:09:33 -04:00
|
|
|
# print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
|
2017-10-18 18:38:17 -04:00
|
|
|
# print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
|
|
|
# raise
|
|
|
|
# else:
|
|
|
|
# print(' √ Skipping video download')
|
2017-10-30 05:01:59 -04:00
|
|
|
|
|
|
|
|
2017-10-30 07:09:33 -04:00
|
|
|
def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
|
|
|
|
args = [binary, '--headless', '--disable-gpu']
|
|
|
|
default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome/Default')
|
2017-10-30 05:01:59 -04:00
|
|
|
if user_data_dir:
|
2017-10-30 07:09:33 -04:00
|
|
|
args.append('--user-data-dir={}'.format(user_data_dir))
|
|
|
|
elif os.path.exists(default_profile):
|
|
|
|
args.append('--user-data-dir={}'.format(default_profile))
|
|
|
|
return args
|