The little things give you away... A collection of various small helper stuff
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

382 lines
14 KiB

  1. #!/usr/bin/env python3
  2. # Only external dependency: requests
  3. import argparse
  4. import asyncio
  5. import base64
  6. import collections
  7. import concurrent.futures
  8. import configparser
  9. import contextlib
  10. import functools
  11. import hashlib
  12. import io
  13. import itertools
  14. import json
  15. import logging
  16. import os
  17. import pprint
  18. import re
  19. import requests
  20. import sys
  21. import time
  22. try:
  23. import tqdm
  24. except ImportError:
  25. tqdm = None
  26. import types
  27. logger = logging.getLogger()
  28. class UploadError(Exception):
  29. def __init__(self, message, r = None, uploadId = None, parts = None):
  30. self.message = message
  31. self.r = r
  32. self.uploadId = uploadId
  33. self.parts = parts
  34. class PreventCompletionError(UploadError):
  35. 'Raised in place of completing the upload when --no-complete is active'
  36. def get_ia_access_secret(configFile = None):
  37. if configFile is None:
  38. # This part of the code is identical (except for style changes) to the one in internetarchive and was written from scratch by JustAnotherArchivist in May and December 2021.
  39. candidates = []
  40. if os.environ.get('IA_CONFIG_FILE'):
  41. candidates.append(os.environ['IA_CONFIG_FILE'])
  42. xdgConfigHome = os.environ.get('XDG_CONFIG_HOME')
  43. if not xdgConfigHome or not os.path.isabs(xdgConfigHome) or not os.path.isdir(xdgConfigHome):
  44. # Per the XDG Base Dir specification, this should be $HOME/.config. Unfortunately, $HOME does not exist on all systems. Therefore, we use ~/.config here.
  45. # On a POSIX-compliant system, where $HOME must always be set, the XDG spec will be followed precisely.
  46. xdgConfigHome = os.path.join(os.path.expanduser('~'), '.config')
  47. candidates.append(os.path.join(xdgConfigHome, 'internetarchive', 'ia.ini'))
  48. candidates.append(os.path.join(os.path.expanduser('~'), '.config', 'ia.ini'))
  49. candidates.append(os.path.join(os.path.expanduser('~'), '.ia'))
  50. for candidate in candidates:
  51. if os.path.isfile(candidate):
  52. configFile = candidate
  53. break
  54. # (End of the identical code)
  55. elif not os.path.isfile(configFile):
  56. configFile = None
  57. if not configFile:
  58. raise RuntimeError('Could not find ia configuration file; did you run `ia configure`?')
  59. config = configparser.RawConfigParser()
  60. config.read(configFile)
  61. if 's3' not in config or 'access' not in config['s3'] or 'secret' not in config['s3']:
  62. raise RuntimeError('Could not read configuration; did you run `ia configure`?')
  63. access = config['s3']['access']
  64. secret = config['s3']['secret']
  65. return access, secret
  66. def metadata_to_headers(metadata):
  67. # metadata is a dict or a list of 2-tuples.
  68. # Returns the headers for the IA S3 request as a dict.
  69. headers = {}
  70. counters = collections.defaultdict(int) # How often each metadata key has been seen
  71. if isinstance(metadata, dict):
  72. metadata = metadata.items()
  73. for key, value in metadata:
  74. headers[f'x-archive-meta{counters[key]:02d}-{key.replace("_", "--")}'] = value.encode('utf-8')
  75. counters[key] += 1
  76. return headers
  77. def readinto_size_limit(fin, fout, size, blockSize = 1048576):
  78. while size:
  79. d = fin.read(min(blockSize, size))
  80. if not d:
  81. break
  82. fout.write(d)
  83. size -= len(d)
  84. @contextlib.contextmanager
  85. def file_progress_bar(f, mode, description, size = None):
  86. if size is None:
  87. pos = f.tell()
  88. f.seek(0, io.SEEK_END)
  89. size = f.tell() - pos
  90. f.seek(pos, io.SEEK_SET)
  91. if tqdm is not None:
  92. with tqdm.tqdm(total = size, unit = 'iB', unit_scale = True, unit_divisor = 1024, desc = description) as t:
  93. wrappedFile = tqdm.utils.CallbackIOWrapper(t.update, f, mode)
  94. yield wrappedFile
  95. else:
  96. # Simple progress bar that just prints a new line with elapsed time and size in MiB on every read or write
  97. processedSize = 0
  98. startTime = time.time()
  99. def _progress(inc):
  100. nonlocal processedSize
  101. processedSize += inc
  102. proc = f'{processedSize / size * 100 :.0f}%, ' if size else ''
  103. of = f' of {size / 1048576 :.2f}' if size else ''
  104. print(f'\r{description}: {proc}{processedSize / 1048576 :.2f}{of} MiB, {time.time() - startTime :.1f} s', end = '', file = sys.stderr)
  105. class Wrapper:
  106. def __init__(self, wrapped):
  107. object.__setattr__(self, '_wrapped', wrapped)
  108. def __getattr__(self, name):
  109. return getattr(self._wrapped, name)
  110. def __setattr__(self, name, value):
  111. return setattr(self._wrapped, name, value)
  112. func = getattr(f, mode)
  113. @functools.wraps(func)
  114. def _readwrite(self, *args, **kwargs):
  115. nonlocal mode
  116. res = func(*args, **kwargs)
  117. if mode == 'write':
  118. data, args = args[0], args[1:]
  119. else:
  120. data = res
  121. _progress(len(data))
  122. return res
  123. wrapper = Wrapper(f)
  124. object.__setattr__(wrapper, mode, types.MethodType(_readwrite, wrapper))
  125. yield wrapper
  126. print(f'\rdone {description}, {processedSize / 1048576 :.2f} MiB in {time.time() - startTime :.1f} seconds', file = sys.stderr) # EOL when it's done
  127. @contextlib.contextmanager
  128. def maybe_file_progress_bar(progress, f, *args, **kwargs):
  129. if progress:
  130. with file_progress_bar(f, *args, **kwargs) as r:
  131. yield r
  132. else:
  133. yield f
  134. def upload_one(url, uploadId, partNumber, data, contentMd5, size, headers, progress, tries):
  135. for attempt in range(1, tries + 1):
  136. if attempt > 1:
  137. logger.info(f'Retrying part {partNumber}')
  138. try:
  139. with maybe_file_progress_bar(progress, data, 'read', f'uploading {partNumber}', size = size) as w:
  140. r = requests.put(f'{url}?partNumber={partNumber}&uploadId={uploadId}', headers = {**headers, 'Content-MD5': contentMd5}, data = w)
  141. except (ConnectionError, requests.exceptions.RequestException) as e:
  142. err = f'error {type(e).__module__}.{type(e).__name__} {e!s}'
  143. else:
  144. if r.status_code == 200:
  145. break
  146. err = f'status {r.status_code}'
  147. sleepTime = min(3 ** attempt, 30)
  148. retrying = f', retrying after {sleepTime} seconds' if attempt < tries else ''
  149. logger.error(f'Got {err} from IA S3 on uploading part {partNumber}{retrying}')
  150. if attempt == tries:
  151. raise UploadError(f'Got {err} from IA S3 on uploading part {partNumber}', r = r, uploadId = uploadId) # parts is added in wait_first
  152. time.sleep(sleepTime)
  153. data.seek(0)
  154. return partNumber, r.headers['ETag']
  155. async def wait_first(tasks, parts):
  156. task = tasks.popleft()
  157. try:
  158. partNumber, eTag = await task
  159. except UploadError as e:
  160. # The upload task can't add an accurate parts list, so add that here and reraise
  161. e.parts = parts
  162. raise
  163. parts.append((partNumber, eTag))
  164. logger.info(f'Upload of part {partNumber} OK, ETag: {eTag}')
  165. async def upload(item, filename, metadata, *, iaConfigFile = None, partSize = 100*1024*1024, tries = 3, concurrency = 1, queueDerive = True, keepOldVersion = True, complete = True, uploadId = None, parts = None, progress = True):
  166. f = sys.stdin.buffer
  167. # Read `ia` config
  168. access, secret = get_ia_access_secret(iaConfigFile)
  169. url = f'https://s3.us.archive.org/{item}/{filename}'
  170. headers = {'Authorization': f'LOW {access}:{secret}'}
  171. if uploadId is None:
  172. # Initiate multipart upload
  173. logger.info(f'Initiating multipart upload for {filename} in {item}')
  174. metadataHeaders = metadata_to_headers(metadata)
  175. r = requests.post(f'{url}?uploads', headers = {**headers, 'x-amz-auto-make-bucket': '1', **metadataHeaders})
  176. if r.status_code != 200:
  177. raise UploadError(f'Could not initiate multipart upload; got status {r.status_code} from IA S3', r = r)
  178. # Fight me!
  179. m = re.search(r'<uploadid>([^<]*)</uploadid>', r.text, re.IGNORECASE)
  180. if not m or not m[1]:
  181. raise UploadError('Could not find upload ID in IA S3 response', r = r)
  182. uploadId = m[1]
  183. logger.info(f'Got upload ID {uploadId}')
  184. # Upload the data in parts
  185. if parts is None:
  186. parts = []
  187. tasks = collections.deque()
  188. loop = asyncio.get_event_loop()
  189. with concurrent.futures.ThreadPoolExecutor(max_workers = concurrency) as executor:
  190. for partNumber in itertools.count(start = len(parts) + 1):
  191. while len(tasks) >= concurrency:
  192. await wait_first(tasks, parts)
  193. data = io.BytesIO()
  194. with maybe_file_progress_bar(progress, data, 'write', 'reading input') as w:
  195. readinto_size_limit(f, w, partSize)
  196. data.seek(0)
  197. size = len(data.getbuffer())
  198. if not size:
  199. # We're done!
  200. break
  201. logger.info(f'Uploading part {partNumber} ({size} bytes)')
  202. logger.info('Calculating MD5')
  203. h = hashlib.md5(data.getbuffer())
  204. logger.info(f'MD5: {h.hexdigest()}')
  205. contentMd5 = base64.b64encode(h.digest()).decode('ascii')
  206. task = loop.run_in_executor(executor, upload_one, url, uploadId, partNumber, data, contentMd5, size, headers, progress, tries)
  207. tasks.append(task)
  208. while tasks:
  209. await wait_first(tasks, parts)
  210. # If --no-complete is used, raise the special error to be caught in main for pretty printing.
  211. if not complete:
  212. logger.info('Not completing upload')
  213. raise PreventCompletionError('', uploadId = uploadId, parts = parts)
  214. # Complete upload
  215. logger.info('Completing upload')
  216. # FUCKING FIGHT ME!
  217. completeData = '<CompleteMultipartUpload>' + ''.join(f'<Part><PartNumber>{partNumber}</PartNumber><ETag>{etag}</ETag></Part>' for partNumber, etag in parts) + '</CompleteMultipartUpload>'
  218. completeData = completeData.encode('utf-8')
  219. extraHeaders = {'x-archive-queue-derive': '1' if queueDerive else '0', 'x-archive-keep-old-version': '1' if keepOldVersion else '0'}
  220. for attempt in range(1, tries + 1):
  221. if attempt > 1:
  222. logger.info('Retrying completion request')
  223. r = requests.post(f'{url}?uploadId={uploadId}', headers = {**headers, **extraHeaders}, data = completeData)
  224. if r.status_code == 200:
  225. break
  226. retrying = f', retrying' if attempt < tries else ''
  227. logger.error(f'Could not complete upload; got status {r.status_code} from IA S3{retrying}')
  228. if attempt == tries:
  229. raise UploadError(f'Could not complete upload; got status {r.status_code} from IA S3', r = r, uploadId = uploadId, parts = parts)
  230. logger.info('Done!')
  231. def abort(item, filename, uploadId, *, iaConfigFile = None, tries = 3):
  232. # Read `ia` config
  233. access, secret = get_ia_access_secret(iaConfigFile)
  234. url = f'https://s3.us.archive.org/{item}/{filename}'
  235. headers = {'Authorization': f'LOW {access}:{secret}'}
  236. # Delete upload
  237. logger.info(f'Aborting upload {uploadId}')
  238. for attempt in range(1, tries + 1):
  239. if attempt > 1:
  240. logger.info('Retrying abort request')
  241. r = requests.delete(f'{url}?uploadId={uploadId}', headers = headers)
  242. if r.status_code == 204:
  243. break
  244. retrying = f', retrying' if attempt < tries else ''
  245. logger.error(f'Could not abort upload; got status {r.status_code} from IA S3{retrying}')
  246. if attempt == tries:
  247. raise UploadError(f'Could not abort upload; got status {r.status_code} from IA S3', r = r, uploadId = uploadId)
  248. logger.info('Done!')
  249. def main():
  250. def metadata(x):
  251. if ':' not in x:
  252. raise ValueError
  253. return x.split(':', 1)
  254. def size(x):
  255. try:
  256. return int(x)
  257. except ValueError:
  258. pass
  259. if x.endswith('M'):
  260. return int(x[:-1]) * 1024 ** 2
  261. elif x.endswith('G'):
  262. return int(x[:-1]) * 1024 ** 3
  263. raise ValueError
  264. def parts(x):
  265. try:
  266. o = json.loads(base64.b64decode(x))
  267. except json.JSONDecodeError as e:
  268. raise ValueError from e
  269. if not isinstance(o, list) or not all(isinstance(e, list) and len(e) == 2 for e in o):
  270. raise ValueError
  271. if [i for i, _ in o] != list(range(1, len(o) + 1)):
  272. raise ValueError
  273. return o
  274. parser = argparse.ArgumentParser()
  275. parser.add_argument('--partsize', dest = 'partSize', type = size, default = size('100M'), help = 'size of each chunk to buffer in memory and upload (default: 100M = 100 MiB)')
  276. parser.add_argument('--no-derive', dest = 'queueDerive', action = 'store_false', help = 'disable queueing a derive task')
  277. parser.add_argument('--clobber', dest = 'keepOldVersion', action = 'store_false', help = 'enable clobbering existing files')
  278. parser.add_argument('--ia-config-file', dest = 'iaConfigFile', metavar = 'FILE', help = 'path to the ia CLI config file (default: search the same paths as ia)')
  279. parser.add_argument('--tries', type = int, default = 3, metavar = 'N', help = 'retry on S3 errors (default: 3)')
  280. parser.add_argument('--concurrency', '--concurrent', type = int, default = 1, metavar = 'N', help = 'upload N parts in parallel (default: 1)')
  281. parser.add_argument('--no-complete', dest = 'complete', action = 'store_false', help = 'disable completing the upload when stdin is exhausted')
  282. parser.add_argument('--no-progress', dest = 'progress', action = 'store_false', help = 'disable progress bar')
  283. parser.add_argument('--upload-id', dest = 'uploadId', help = 'upload ID when resuming or aborting an upload')
  284. parser.add_argument('--parts', type = parts, help = 'previous parts data for resumption; can only be used with --upload-id')
  285. parser.add_argument('--abort', action = 'store_true', help = 'aborts an upload; can only be used with --upload-id; most other options are ignored when this is used')
  286. parser.add_argument('item', help = 'identifier of the target item')
  287. parser.add_argument('filename', help = 'filename to store the data to')
  288. parser.add_argument('metadata', nargs = '*', type = metadata, help = "metadata for the item in the form 'key:value'; only has an effect if the item doesn't exist yet")
  289. args = parser.parse_args()
  290. if (args.parts or args.abort) and not args.uploadId:
  291. parser.error('--parts and --abort can only be used together with --upload-id')
  292. if args.uploadId and (args.parts is not None) == bool(args.abort):
  293. parser.error('--upload-id requires exactly one of --parts and --abort')
  294. logging.basicConfig(level = logging.INFO, format = '{asctime}.{msecs:03.0f} {levelname} {name} {message}', datefmt = '%Y-%m-%d %H:%M:%S', style = '{')
  295. try:
  296. if not args.abort:
  297. asyncio.run(upload(
  298. args.item,
  299. args.filename,
  300. args.metadata,
  301. iaConfigFile = args.iaConfigFile,
  302. partSize = args.partSize,
  303. tries = args.tries,
  304. concurrency = args.concurrency,
  305. queueDerive = args.queueDerive,
  306. keepOldVersion = args.keepOldVersion,
  307. complete = args.complete,
  308. uploadId = args.uploadId,
  309. parts = args.parts,
  310. progress = args.progress,
  311. ))
  312. else:
  313. abort(
  314. args.item,
  315. args.filename,
  316. args.uploadId,
  317. iaConfigFile = args.iaConfigFile,
  318. tries = args.tries,
  319. )
  320. except (RuntimeError, UploadError) as e:
  321. if isinstance(e, PreventCompletionError):
  322. level = logging.INFO
  323. else:
  324. logger.exception('Unhandled exception raised')
  325. level = logging.WARNING
  326. if isinstance(e, UploadError):
  327. if e.r is not None:
  328. logger.info(pprint.pformat(vars(e.r.request)), exc_info = False)
  329. logger.info(pprint.pformat(vars(e.r)), exc_info = False)
  330. if e.uploadId:
  331. logger.log(level, f'Upload ID for resumption or abortion: {e.uploadId}', exc_info = False)
  332. if e.parts:
  333. parts = base64.b64encode(json.dumps(e.parts, separators = (',', ':')).encode('ascii')).decode('ascii')
  334. logger.log(level, f'Previous parts data for resumption: {parts}', exc_info = False)
  335. if __name__ == '__main__':
  336. main()