The little things give you away... A collection of various small helper stuff
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

488 lines
20 KiB

  1. #!/usr/bin/env python3
  2. # Only external dependency: requests
  3. import argparse
  4. import base64
  5. import collections
  6. import concurrent.futures
  7. import configparser
  8. import contextlib
  9. import functools
  10. import hashlib
  11. import io
  12. import itertools
  13. import json
  14. import logging
  15. import os
  16. import pprint
  17. import re
  18. import requests
  19. import sys
  20. import time
  21. try:
  22. import tqdm
  23. except ImportError:
  24. tqdm = None
  25. import types
  26. import urllib.parse
  27. logger = logging.getLogger()
  28. # Timeout used for everything except part uploads
  29. TIMEOUT = 60
  30. class UploadError(Exception):
  31. def __init__(self, message, r = None, uploadId = None, parts = None):
  32. self.message = message
  33. self.r = r
  34. self.uploadId = uploadId
  35. self.parts = parts
  36. class PreventCompletionError(UploadError):
  37. 'Raised in place of completing the upload when --no-complete is active'
  38. def get_ia_access_secret(configFile = None):
  39. if 'IA_S3_ACCESS' in os.environ and 'IA_S3_SECRET' in os.environ:
  40. return os.environ['IA_S3_ACCESS'], os.environ['IA_S3_SECRET']
  41. if configFile is None:
  42. # This part of the code is identical (except for style changes) to the one in internetarchive and was written from scratch by JustAnotherArchivist in May and December 2021.
  43. candidates = []
  44. if os.environ.get('IA_CONFIG_FILE'):
  45. candidates.append(os.environ['IA_CONFIG_FILE'])
  46. xdgConfigHome = os.environ.get('XDG_CONFIG_HOME')
  47. if not xdgConfigHome or not os.path.isabs(xdgConfigHome) or not os.path.isdir(xdgConfigHome):
  48. # Per the XDG Base Dir specification, this should be $HOME/.config. Unfortunately, $HOME does not exist on all systems. Therefore, we use ~/.config here.
  49. # On a POSIX-compliant system, where $HOME must always be set, the XDG spec will be followed precisely.
  50. xdgConfigHome = os.path.join(os.path.expanduser('~'), '.config')
  51. candidates.append(os.path.join(xdgConfigHome, 'internetarchive', 'ia.ini'))
  52. candidates.append(os.path.join(os.path.expanduser('~'), '.config', 'ia.ini'))
  53. candidates.append(os.path.join(os.path.expanduser('~'), '.ia'))
  54. for candidate in candidates:
  55. if os.path.isfile(candidate):
  56. configFile = candidate
  57. break
  58. # (End of the identical code)
  59. elif not os.path.isfile(configFile):
  60. configFile = None
  61. if not configFile:
  62. raise RuntimeError('Could not find ia configuration file; did you run `ia configure`?')
  63. config = configparser.RawConfigParser()
  64. config.read(configFile)
  65. if 's3' not in config or 'access' not in config['s3'] or 'secret' not in config['s3']:
  66. raise RuntimeError('Could not read configuration; did you run `ia configure`?')
  67. access = config['s3']['access']
  68. secret = config['s3']['secret']
  69. return access, secret
  70. def metadata_to_headers(metadata):
  71. # metadata is a dict or a list of 2-tuples.
  72. # Returns the headers for the IA S3 request as a dict.
  73. headers = {}
  74. counters = collections.defaultdict(int) # How often each metadata key has been seen
  75. if isinstance(metadata, dict):
  76. metadata = metadata.items()
  77. for key, value in metadata:
  78. headers[f'x-archive-meta{counters[key]:02d}-{key.replace("_", "--")}'] = f'uri({urllib.parse.quote(value.encode("utf-8"))})'
  79. counters[key] += 1
  80. return headers
  81. def readinto_size_limit(fin, fout, size, blockSize = 1048576):
  82. while size:
  83. d = fin.read(min(blockSize, size))
  84. if not d:
  85. break
  86. fout.write(d)
  87. size -= len(d)
  88. def get_part(f, partSize, progress, _data = None):
  89. if _data is not None:
  90. data = _data
  91. data.seek(0)
  92. data.truncate()
  93. else:
  94. data = io.BytesIO()
  95. with maybe_file_progress_bar(progress, data, 'write', 'reading input') as w:
  96. readinto_size_limit(f, w, partSize)
  97. data.seek(0)
  98. size = len(data.getbuffer())
  99. logger.info('Calculating MD5')
  100. h = hashlib.md5(data.getbuffer())
  101. logger.info(f'MD5: {h.hexdigest()}')
  102. contentMd5 = base64.b64encode(h.digest()).decode('ascii')
  103. return (data, size, contentMd5)
  104. @contextlib.contextmanager
  105. def file_progress_bar(f, mode, description, size = None):
  106. if size is None:
  107. pos = f.tell()
  108. f.seek(0, io.SEEK_END)
  109. size = f.tell() - pos
  110. f.seek(pos, io.SEEK_SET)
  111. if tqdm is not None:
  112. with tqdm.tqdm(total = size, unit = 'iB', unit_scale = True, unit_divisor = 1024, desc = description) as t:
  113. wrappedFile = tqdm.utils.CallbackIOWrapper(t.update, f, mode)
  114. yield wrappedFile
  115. else:
  116. # Simple progress bar that just prints a new line with elapsed time and size in MiB on every read or write if it hasn't printed for at least a second
  117. processedSize = 0
  118. startTime = time.time()
  119. lastPrintTime = 0
  120. def _progress(inc):
  121. nonlocal processedSize, lastPrintTime
  122. processedSize += inc
  123. now = time.time()
  124. if now - lastPrintTime < 1:
  125. return
  126. proc = f'{processedSize / size * 100 :.0f}%, ' if size else ''
  127. of = f' of {size / 1048576 :.2f}' if size else ''
  128. print(f'\r{description}: {proc}{processedSize / 1048576 :.2f}{of} MiB, {now - startTime :.1f} s', end = '', file = sys.stderr)
  129. lastPrintTime = now
  130. class Wrapper:
  131. def __init__(self, wrapped):
  132. object.__setattr__(self, '_wrapped', wrapped)
  133. def __getattr__(self, name):
  134. return getattr(self._wrapped, name)
  135. def __setattr__(self, name, value):
  136. return setattr(self._wrapped, name, value)
  137. func = getattr(f, mode)
  138. @functools.wraps(func)
  139. def _readwrite(self, *args, **kwargs):
  140. nonlocal mode
  141. res = func(*args, **kwargs)
  142. if mode == 'write':
  143. data, args = args[0], args[1:]
  144. else:
  145. data = res
  146. _progress(len(data))
  147. return res
  148. wrapper = Wrapper(f)
  149. object.__setattr__(wrapper, mode, types.MethodType(_readwrite, wrapper))
  150. yield wrapper
  151. print(f'\r\x1b[Kdone {description}, {processedSize / 1048576 :.2f} MiB in {time.time() - startTime :.1f} seconds', file = sys.stderr) # EOL when it's done
  152. @contextlib.contextmanager
  153. def maybe_file_progress_bar(progress, f, *args, **kwargs):
  154. if progress:
  155. with file_progress_bar(f, *args, **kwargs) as r:
  156. yield r
  157. else:
  158. yield f
  159. def upload_one(url, uploadId, partNumber, data, contentMd5, size, headers, progress, tries, timeout):
  160. r = None # For UploadError in case of a timeout
  161. if partNumber:
  162. url = f'{url}?partNumber={partNumber}&uploadId={uploadId}'
  163. for attempt in range(1, tries + 1):
  164. if attempt > 1:
  165. logger.info(f'Retrying part {partNumber}')
  166. try:
  167. with maybe_file_progress_bar(progress, data, 'read', f'uploading {partNumber}', size = size) as w:
  168. r = requests.put(url, headers = {**headers, 'Content-MD5': contentMd5}, data = w, timeout = timeout)
  169. except (ConnectionError, requests.exceptions.RequestException) as e:
  170. err = f'error {type(e).__module__}.{type(e).__name__} {e!s}'
  171. else:
  172. if r.status_code == 200:
  173. break
  174. err = f'status {r.status_code}'
  175. sleepTime = min(3 ** attempt, 30)
  176. retrying = f', retrying after {sleepTime} seconds' if attempt < tries else ''
  177. logger.error(f'Got {err} from IA S3 on uploading part {partNumber}{retrying}')
  178. if attempt == tries:
  179. raise UploadError(f'Got {err} from IA S3 on uploading part {partNumber}', r = r, uploadId = uploadId) # parts is added in wait_first
  180. time.sleep(sleepTime)
  181. data.seek(0)
  182. return partNumber, r.headers['ETag'], data
  183. def wait_first(tasks, parts):
  184. task = tasks.popleft()
  185. done, _ = concurrent.futures.wait({task})
  186. assert task in done
  187. try:
  188. partNumber, eTag, data = task.result()
  189. except UploadError as e:
  190. # The upload task can't add an accurate parts list, so add that here and reraise
  191. e.parts = parts
  192. raise
  193. parts.append((partNumber, eTag))
  194. logger.info(f'Upload of part {partNumber} OK, ETag: {eTag}')
  195. return data
  196. def upload(item, filename, metadata, *, iaConfigFile = None, partSize = 100*1024*1024, tries = 3, partTimeout = None, concurrency = 1, queueDerive = True, keepOldVersion = True, complete = True, uploadId = None, parts = None, progress = True, sizeHint = None):
  197. f = sys.stdin.buffer
  198. # Read `ia` config
  199. access, secret = get_ia_access_secret(iaConfigFile)
  200. url = f'https://s3.us.archive.org/{item}/{filename}'
  201. headers = {'Authorization': f'LOW {access}:{secret}'}
  202. metadataHeaders = metadata_to_headers(metadata)
  203. initialHeaders = {**headers, 'x-amz-auto-make-bucket': '1', **metadataHeaders}
  204. if sizeHint:
  205. initialHeaders['x-archive-size-hint'] = str(sizeHint)
  206. extraHeaders = {'x-archive-queue-derive': '1' if queueDerive else '0', 'x-archive-keep-old-version': '1' if keepOldVersion else '0'}
  207. # Always read the first part
  208. data, size, contentMd5 = get_part(f, partSize, progress)
  209. # If the file is only a single part anyway, use the normal PUT API instead of multipart because IA can process that *much* faster.
  210. if uploadId is None and parts is None and complete and size < partSize:
  211. logger.info(f'Uploading in one piece ({size} bytes)')
  212. partNumber, eTag, _ = upload_one(url, None, 0, data, contentMd5, size, {**initialHeaders, **extraHeaders}, progress, tries, partTimeout)
  213. logger.info(f'Upload OK, ETag: {eTag}')
  214. logger.info('Done!')
  215. return
  216. if uploadId is None:
  217. # Initiate multipart upload
  218. logger.info(f'Initiating multipart upload for {filename} in {item}')
  219. r = requests.post(f'{url}?uploads', headers = initialHeaders, timeout = TIMEOUT)
  220. if r.status_code != 200:
  221. raise UploadError(f'Could not initiate multipart upload; got status {r.status_code} from IA S3', r = r)
  222. # Fight me!
  223. m = re.search(r'<uploadid>([^<]*)</uploadid>', r.text, re.IGNORECASE)
  224. if not m or not m[1]:
  225. raise UploadError('Could not find upload ID in IA S3 response', r = r)
  226. uploadId = m[1]
  227. logger.info(f'Got upload ID {uploadId}')
  228. # Wait for the item to exist; if the above created the item, it takes a little while for IA to actually create the bucket, and uploads would fail with a 404 until then.
  229. # Use four times the normal amount of retries because it frequently breaks...
  230. for attempt in range(1, 4 * tries + 1):
  231. logger.info(f'Checking for existence of {item}')
  232. r = requests.get(f'https://s3.us.archive.org/{item}/', headers = headers, timeout = TIMEOUT)
  233. if r.status_code == 200:
  234. break
  235. sleepTime = min(3 ** attempt, 30)
  236. retrying = f', retrying after {sleepTime} seconds' if attempt < tries else ''
  237. logger.error(f'Got status code {r.status_code} from IA S3 on checking for item existence{retrying}')
  238. if attempt == tries:
  239. raise UploadError('Item still does not exist', r = r, uploadId = uploadId, parts = parts)
  240. time.sleep(sleepTime)
  241. # Upload the data in parts
  242. if parts is None:
  243. parts = []
  244. tasks = collections.deque()
  245. with concurrent.futures.ThreadPoolExecutor(max_workers = concurrency) as executor:
  246. logger.info(f'Uploading part {len(parts) + 1} ({size} bytes)')
  247. task = executor.submit(upload_one, url, uploadId, len(parts) + 1, data, contentMd5, size, headers, progress, tries, partTimeout)
  248. tasks.append(task)
  249. for partNumber in itertools.count(start = len(parts) + 2):
  250. data = None
  251. while len(tasks) >= concurrency:
  252. data = wait_first(tasks, parts)
  253. data, size, contentMd5 = get_part(f, partSize, progress, _data = data)
  254. if not size:
  255. # We're done!
  256. break
  257. logger.info(f'Uploading part {partNumber} ({size} bytes)')
  258. task = executor.submit(upload_one, url, uploadId, partNumber, data, contentMd5, size, headers, progress, tries, partTimeout)
  259. tasks.append(task)
  260. while tasks:
  261. wait_first(tasks, parts)
  262. # If --no-complete is used, raise the special error to be caught in main for pretty printing.
  263. if not complete:
  264. logger.info('Not completing upload')
  265. raise PreventCompletionError('', uploadId = uploadId, parts = parts)
  266. # Complete upload
  267. logger.info('Completing upload')
  268. # FUCKING FIGHT ME!
  269. completeData = '<CompleteMultipartUpload>' + ''.join(f'<Part><PartNumber>{partNumber}</PartNumber><ETag>{etag}</ETag></Part>' for partNumber, etag in parts) + '</CompleteMultipartUpload>'
  270. completeData = completeData.encode('utf-8')
  271. for attempt in range(1, tries + 1):
  272. if attempt > 1:
  273. logger.info('Retrying completion request')
  274. r = requests.post(f'{url}?uploadId={uploadId}', headers = {**headers, **extraHeaders}, data = completeData, timeout = TIMEOUT)
  275. if r.status_code == 200:
  276. break
  277. retrying = f', retrying' if attempt < tries else ''
  278. logger.error(f'Could not complete upload; got status {r.status_code} from IA S3{retrying}')
  279. if attempt == tries:
  280. raise UploadError(f'Could not complete upload; got status {r.status_code} from IA S3', r = r, uploadId = uploadId, parts = parts)
  281. logger.info('Done!')
  282. def list_uploads(item, *, tries = 3):
  283. # No auth needed
  284. url = f'https://s3.us.archive.org/{item}/?uploads'
  285. # This endpoint (sometimes? not anymore?) redirects to the server storing the item under ia######.s3dns.us.archive.org, but those servers present an invalid TLS certificate for *.us.archive.org.
  286. class IAS3CertificateFixHTTPAdapter(requests.adapters.HTTPAdapter):
  287. def init_poolmanager(self, *args, **kwargs):
  288. kwargs['assert_hostname'] = 's3.us.archive.org'
  289. return super().init_poolmanager(*args, **kwargs)
  290. for attempt in range(1, tries + 1):
  291. r = requests.get(url, allow_redirects = False, timeout = TIMEOUT)
  292. if r.status_code == 200 or (r.status_code == 307 and '.s3dns.us.archive.org' in r.headers['Location']):
  293. if r.status_code == 307:
  294. s3dnsUrl = r.headers['Location']
  295. s3dnsUrl = s3dnsUrl.replace('http://', 'https://')
  296. s3dnsUrl = s3dnsUrl.replace('.s3dns.us.archive.org:80/', '.s3dns.us.archive.org/')
  297. domain = s3dnsUrl[8:s3dnsUrl.find('/', 9)]
  298. s = requests.Session()
  299. s.mount(f'https://{domain}/', IAS3CertificateFixHTTPAdapter())
  300. r = s.get(s3dnsUrl, timeout = TIMEOUT)
  301. if r.status_code == 200:
  302. print(f'In-progress uploads for {item} (initiation datetime, upload ID, filename):')
  303. for upload in re.findall(r'<Upload>.*?</Upload>', r.text):
  304. uploadId = re.search(r'<UploadId>(.*?)</UploadId>', upload).group(1)
  305. filename = re.search(r'<Key>(.*?)</Key>', upload).group(1)
  306. date = re.search(r'<Initiated>(.*?)</Initiated>', upload).group(1)
  307. print(f'{date} {uploadId} {filename}')
  308. break
  309. retrying = f', retrying' if attempt < tries else ''
  310. logger.error(f'Could not list uploads; got status {r.status_code} from IA S3{retrying}')
  311. if attempt == tries:
  312. raise UploadError(f'Could not list uploads; got status {r.status_code} from IA S3', r = r)
  313. def abort(item, filename, uploadId, *, iaConfigFile = None, tries = 3):
  314. # Read `ia` config
  315. access, secret = get_ia_access_secret(iaConfigFile)
  316. url = f'https://s3.us.archive.org/{item}/{filename}'
  317. headers = {'Authorization': f'LOW {access}:{secret}'}
  318. # Delete upload
  319. logger.info(f'Aborting upload {uploadId}')
  320. for attempt in range(1, tries + 1):
  321. if attempt > 1:
  322. logger.info('Retrying abort request')
  323. r = requests.delete(f'{url}?uploadId={uploadId}', headers = headers, timeout = TIMEOUT)
  324. if r.status_code == 204:
  325. break
  326. retrying = f', retrying' if attempt < tries else ''
  327. logger.error(f'Could not abort upload; got status {r.status_code} from IA S3{retrying}')
  328. if attempt == tries:
  329. raise UploadError(f'Could not abort upload; got status {r.status_code} from IA S3', r = r, uploadId = uploadId)
  330. logger.info('Done!')
  331. def main():
  332. def metadata(x):
  333. if ':' not in x:
  334. raise ValueError
  335. return x.split(':', 1)
  336. def size(x):
  337. try:
  338. return int(x)
  339. except ValueError:
  340. pass
  341. if x.endswith('M'):
  342. return int(float(x[:-1]) * 1024 ** 2)
  343. elif x.endswith('G'):
  344. return int(float(x[:-1]) * 1024 ** 3)
  345. raise ValueError
  346. def parts(x):
  347. try:
  348. o = json.loads(base64.b64decode(x))
  349. except json.JSONDecodeError as e:
  350. raise ValueError from e
  351. if not isinstance(o, list) or not all(isinstance(e, list) and len(e) == 2 for e in o):
  352. raise ValueError
  353. if [i for i, _ in o] != list(range(1, len(o) + 1)):
  354. raise ValueError
  355. return o
  356. parser = argparse.ArgumentParser()
  357. parser.add_argument('--part-size', '--partsize', dest = 'partSize', type = size, default = size('100M'), help = 'size of each chunk to buffer in memory and upload (default: 100M = 100 MiB)')
  358. parser.add_argument('--no-derive', dest = 'queueDerive', action = 'store_false', help = 'disable queueing a derive task')
  359. parser.add_argument('--clobber', dest = 'keepOldVersion', action = 'store_false', help = 'enable clobbering existing files')
  360. parser.add_argument('--ia-config-file', dest = 'iaConfigFile', metavar = 'FILE', help = 'path to the ia CLI config file (default: search the same paths as ia)')
  361. parser.add_argument('--tries', type = int, default = 3, metavar = 'N', help = 'retry on S3 errors (default: 3)')
  362. parser.add_argument('--timeout', type = float, default = None, metavar = 'SECONDS', help = 'timeout for part uploads (default: unlimited)')
  363. parser.add_argument('--concurrency', '--concurrent', type = int, default = 1, metavar = 'N', help = 'upload N parts in parallel (default: 1)')
  364. parser.add_argument('--no-complete', dest = 'complete', action = 'store_false', help = 'disable completing the upload when stdin is exhausted')
  365. parser.add_argument('--no-progress', dest = 'progress', action = 'store_false', help = 'disable progress bar')
  366. parser.add_argument('--size-hint', dest = 'sizeHint', type = size, help = "size hint for the total item size; only has an effect if the item doesn't exist yet")
  367. parser.add_argument('--upload-id', dest = 'uploadId', help = 'upload ID when resuming or aborting an upload')
  368. parser.add_argument('--parts', type = parts, help = 'previous parts data for resumption; can only be used with --upload-id')
  369. parser.add_argument('--abort', action = 'store_true', help = 'aborts an upload; can only be used with --upload-id; most other options are ignored when this is used')
  370. parser.add_argument('--list', action = 'store_true', help = 'list in-progress uploads for item; most other options are ignored when this is used')
  371. parser.add_argument('item', help = 'identifier of the target item')
  372. parser.add_argument('filename', nargs = '?', help = 'filename to store the data to')
  373. parser.add_argument('metadata', nargs = '*', type = metadata, help = "metadata for the item in the form 'key:value'; only has an effect if the item doesn't exist yet")
  374. args = parser.parse_args()
  375. if (args.parts or args.abort) and not args.uploadId:
  376. parser.error('--parts and --abort can only be used together with --upload-id')
  377. if args.uploadId and (args.parts is not None) == bool(args.abort):
  378. parser.error('--upload-id requires exactly one of --parts and --abort')
  379. if args.abort and args.list:
  380. parser.error('--abort and --list cannot be used together')
  381. if not args.list and not args.filename:
  382. parser.error('filename is required when not using --list')
  383. logging.basicConfig(level = logging.INFO, format = '{asctime}.{msecs:03.0f} {levelname} {name} {message}', datefmt = '%Y-%m-%d %H:%M:%S', style = '{')
  384. try:
  385. if not args.abort and not args.list:
  386. upload(
  387. args.item,
  388. args.filename,
  389. args.metadata,
  390. iaConfigFile = args.iaConfigFile,
  391. partSize = args.partSize,
  392. tries = args.tries,
  393. partTimeout = args.timeout,
  394. concurrency = args.concurrency,
  395. queueDerive = args.queueDerive,
  396. keepOldVersion = args.keepOldVersion,
  397. complete = args.complete,
  398. uploadId = args.uploadId,
  399. parts = args.parts,
  400. progress = args.progress,
  401. sizeHint = args.sizeHint,
  402. )
  403. elif args.list:
  404. list_uploads(args.item, tries = args.tries)
  405. else:
  406. abort(
  407. args.item,
  408. args.filename,
  409. args.uploadId,
  410. iaConfigFile = args.iaConfigFile,
  411. tries = args.tries,
  412. )
  413. except (RuntimeError, UploadError) as e:
  414. if isinstance(e, PreventCompletionError):
  415. level = logging.INFO
  416. status = 0
  417. else:
  418. logger.exception('Unhandled exception raised')
  419. level = logging.WARNING
  420. status = 1
  421. if isinstance(e, UploadError):
  422. if e.r is not None:
  423. logger.info(pprint.pformat(vars(e.r.request)), exc_info = False)
  424. logger.info(pprint.pformat(vars(e.r)), exc_info = False)
  425. if e.uploadId:
  426. logger.log(level, f'Upload ID for resumption or abortion: {e.uploadId}', exc_info = False)
  427. parts = base64.b64encode(json.dumps(e.parts, separators = (',', ':')).encode('ascii')).decode('ascii')
  428. logger.log(level, f'Previous parts data for resumption: {parts}', exc_info = False)
  429. sys.exit(status)
  430. if __name__ == '__main__':
  431. main()