The little things give you away... A collection of various small helper stuff
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

150 lines
5.7 KiB

  1. #!/usr/bin/env python3
  2. import asyncio
  3. import collections
  4. import http.client
  5. import json
  6. import re
  7. import shlex
  8. import sys
  9. import time
  10. HOST = 'web.archive.org'
  11. def make_connection():
  12. return http.client.HTTPSConnection(HOST, timeout = 60)
  13. def fetch(url, tries, connection):
  14. for i in range(tries):
  15. try:
  16. print(f'GET {url}', file = sys.stderr)
  17. connection.request('GET', url)
  18. r = connection.getresponse()
  19. status = r.status
  20. print(f'{status} {url}', file = sys.stderr)
  21. if status == 302 and r.getheader('Location') in ('https://web.archive.org/429.html', '/429.html'):
  22. # The CDX API is stupid and doesn't return 429s directly...
  23. print('Exceeded rate limit, waiting...', file = sys.stderr)
  24. time.sleep(30)
  25. raise RuntimeError(f'Rate-limited on {url}')
  26. if status != 200:
  27. raise RuntimeError(f'Could not fetch {url}')
  28. data = r.read()
  29. print(f'Read {len(data)} bytes from {url}', file = sys.stderr)
  30. o = json.loads(data)
  31. break
  32. except (RuntimeError, TimeoutError, http.client.HTTPException, json.JSONDecodeError) as e:
  33. print(f'Error retrieving {url}: {type(e).__module__}.{type(e).__name__} {e!s}', file = sys.stderr)
  34. connection.close()
  35. connection = make_connection()
  36. if i == tries - 1:
  37. raise
  38. return url, status, o, connection
  39. async def wait_first_and_print(tasks):
  40. if not tasks:
  41. return
  42. task = tasks.popleft()
  43. url, code, o, connection = await task
  44. assert o, 'got empty response'
  45. fields = o[0]
  46. assert all(len(v) == len(fields) for v in o[1:]), 'got unexpected response format'
  47. for row in o[1:]:
  48. print(json.dumps(dict(zip(fields, row))))
  49. print(f'Completed processing page {task._ia_cdx_page}', file = sys.stderr)
  50. return task._ia_cdx_page, connection
  51. async def main(query, concurrency = 1, tries = 1, startPage = None, numPages = None):
  52. assert (startPage is None) == (numPages is None)
  53. connections = collections.deque()
  54. for i in range(concurrency):
  55. connections.append(make_connection())
  56. baseUrl = f'/cdx/search/cdx?{query}'
  57. if startPage is None:
  58. url = f'{baseUrl}&showNumPages=true'
  59. connection = connections.popleft()
  60. _, _, numPages, connection = fetch(url, tries, connection)
  61. numPages = int(numPages)
  62. connections.append(connection)
  63. startPage = 0
  64. print(f'{numPages} pages', file = sys.stderr)
  65. loop = asyncio.get_running_loop()
  66. tasks = collections.deque()
  67. lastGoodPage = -1
  68. try:
  69. try:
  70. for page in range(startPage, numPages):
  71. while len(tasks) >= concurrency:
  72. lastGoodPage, connection = await wait_first_and_print(tasks)
  73. connections.append(connection)
  74. url = f'{baseUrl}&output=json&page={page}'
  75. connection = connections.popleft()
  76. task = loop.run_in_executor(None, fetch, url, tries, connection)
  77. task._ia_cdx_page = page
  78. tasks.append(task)
  79. while len(tasks) > 0:
  80. lastGoodPage, connection = await wait_first_and_print(tasks)
  81. connections.append(connection)
  82. except:
  83. # It isn't possible to actually cancel a task running in a thread, so need to await them and discard any additional errors that occur.
  84. for task in tasks:
  85. try:
  86. _, _, _, connection = await task
  87. except:
  88. pass
  89. else:
  90. connections.append(connection)
  91. for connection in connections:
  92. connection.close()
  93. raise
  94. except (RuntimeError, json.JSONDecodeError, AssertionError):
  95. concurrencyS = f'--concurrency {concurrency} ' if concurrency != 1 else ''
  96. triesS = f'--tries {tries} ' if tries != 1 else ''
  97. print(f'To resume this search from where it crashed, run: ia-cdx-search {concurrencyS}{triesS}--page {lastGoodPage + 1} --numpages {numPages} {shlex.quote(query)}', file = sys.stderr)
  98. raise
  99. except (BrokenPipeError, KeyboardInterrupt):
  100. pass
  101. def usage():
  102. print('Usage: ia-cdx-search [--concurrency N] [--tries N] [--page N --numpages N] QUERY', file = sys.stderr)
  103. print('Please refer to https://github.com/internetarchive/wayback/tree/master/wayback-cdx-server for the relevant query parameters', file = sys.stderr)
  104. print('The output, limit, resumeKey, showResumeKey, page, and showNumPages parameters must not be included.', file = sys.stderr)
  105. print('To resume a search that failed for some reason, provide the page number and number of pages through the second argument instead.', file = sys.stderr)
  106. print('Output is produces in JSONL format with one line per CDX entry.', file = sys.stderr)
  107. print('', file = sys.stderr)
  108. print('Examples:', file = sys.stderr)
  109. print(" - Subdomains: ia-cdx-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/'", file = sys.stderr)
  110. print(' Note that this will only find subdomains whose homepages are in the Wayback Machine. To discover all known subdomains, remove the filter and then extract the domains from the results.', file = sys.stderr)
  111. print(" - Subdirectories: ia-cdex-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/[^/]*/'", file = sys.stderr)
  112. print(' The same caveat applies. The directory must have been retrieved directly without an additional trailing path or query string.', file = sys.stderr)
  113. sys.exit(1)
  114. args = sys.argv[1:]
  115. if args[0].lower() in ('-h', '--help'):
  116. usage()
  117. kwargs = {}
  118. while args[0].startswith('--'):
  119. if args[0] == '--concurrency':
  120. kwargs['concurrency'] = int(args[1])
  121. args = args[2:]
  122. elif args[0] == '--tries':
  123. kwargs['tries'] = int(args[1])
  124. args = args[2:]
  125. elif args[0] == '--page' and args[2].lower() == '--numpages':
  126. kwargs['startPage'] = int(args[1])
  127. kwargs['numPages'] = int(args[3])
  128. args = args[4:]
  129. else:
  130. break
  131. if len(args) != 1 or re.search(r'(^|&)(output|limit|resumekey|showresumekey|page|shownumpages)=', args[0], re.IGNORECASE):
  132. usage()
  133. query = args[0]
  134. asyncio.run(main(query, **kwargs))