The little things give you away... A collection of various small helper stuff
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

156 lines
6.0 KiB

  1. #!/usr/bin/env python3
  2. import asyncio
  3. import collections
  4. import http.client
  5. import json
  6. import re
  7. import shlex
  8. import socket
  9. import sys
  10. import time
  11. HOST = 'web.archive.org'
  12. def make_connection():
  13. return http.client.HTTPSConnection(HOST, timeout = 60)
  14. def fetch(url, tries, connection):
  15. for i in range(tries):
  16. try:
  17. print(f'GET {url}', file = sys.stderr)
  18. connection.request('GET', url)
  19. r = connection.getresponse()
  20. status = r.status
  21. print(f'{status} {url}', file = sys.stderr)
  22. if status == 302 and r.getheader('Location') in ('https://web.archive.org/429.html', '/429.html'):
  23. # The CDX API is (was?) stupid and doesn't return 429s directly...
  24. status = 429
  25. if status == 429:
  26. print('Exceeded rate limit, waiting...', file = sys.stderr)
  27. time.sleep(30)
  28. raise RuntimeError(f'Rate-limited on {url}')
  29. if status != 200:
  30. raise RuntimeError(f'Could not fetch {url}')
  31. data = r.read()
  32. print(f'Read {len(data)} bytes from {url}', file = sys.stderr)
  33. o = json.loads(data)
  34. break
  35. except (RuntimeError, TimeoutError, socket.timeout, ConnectionError, http.client.HTTPException, json.JSONDecodeError) as e:
  36. # socket.timeout is an alias of TimeoutError from Python 3.10 but still needs to be caught explicitly for older versions
  37. print(f'Error retrieving {url}: {type(e).__module__}.{type(e).__name__} {e!s}', file = sys.stderr)
  38. connection.close()
  39. connection = make_connection()
  40. if i == tries - 1:
  41. raise
  42. return url, status, o, connection
  43. async def wait_first_and_print(tasks):
  44. if not tasks:
  45. return
  46. task = tasks.popleft()
  47. url, code, o, connection = await task
  48. if not o:
  49. print(f'Completed processing page {task._ia_cdx_page} (0 results)', file = sys.stderr)
  50. return task._ia_cdx_page, connection
  51. fields = o[0]
  52. assert all(len(v) == len(fields) for v in o[1:]), 'got unexpected response format'
  53. for row in o[1:]:
  54. print(json.dumps(dict(zip(fields, row))))
  55. print(f'Completed processing page {task._ia_cdx_page} ({len(o) - 1} results)', file = sys.stderr)
  56. return task._ia_cdx_page, connection
  57. async def main(query, concurrency = 1, tries = 1, startPage = None, numPages = None):
  58. assert (startPage is None) == (numPages is None)
  59. connections = collections.deque()
  60. for i in range(concurrency):
  61. connections.append(make_connection())
  62. baseUrl = f'/cdx/search/cdx?{query}'
  63. if startPage is None:
  64. url = f'{baseUrl}&showNumPages=true'
  65. connection = connections.popleft()
  66. _, _, numPages, connection = fetch(url, tries, connection)
  67. numPages = int(numPages)
  68. connections.append(connection)
  69. startPage = 0
  70. print(f'{numPages} pages', file = sys.stderr)
  71. loop = asyncio.get_running_loop()
  72. tasks = collections.deque()
  73. lastGoodPage = -1
  74. try:
  75. try:
  76. for page in range(startPage, numPages):
  77. while len(tasks) >= concurrency:
  78. lastGoodPage, connection = await wait_first_and_print(tasks)
  79. connections.append(connection)
  80. url = f'{baseUrl}&output=json&page={page}'
  81. connection = connections.popleft()
  82. task = loop.run_in_executor(None, fetch, url, tries, connection)
  83. task._ia_cdx_page = page
  84. tasks.append(task)
  85. while len(tasks) > 0:
  86. lastGoodPage, connection = await wait_first_and_print(tasks)
  87. connections.append(connection)
  88. except:
  89. # It isn't possible to actually cancel a task running in a thread, so need to await them and discard any additional errors that occur.
  90. for task in tasks:
  91. try:
  92. _, _, _, connection = await task
  93. except:
  94. pass
  95. else:
  96. connections.append(connection)
  97. for connection in connections:
  98. connection.close()
  99. raise
  100. except (RuntimeError, json.JSONDecodeError, AssertionError):
  101. concurrencyS = f'--concurrency {concurrency} ' if concurrency != 1 else ''
  102. triesS = f'--tries {tries} ' if tries != 1 else ''
  103. print(f'To resume this search from where it crashed, run: ia-cdx-search {concurrencyS}{triesS}--page {lastGoodPage + 1} --numpages {numPages} {shlex.quote(query)}', file = sys.stderr)
  104. raise
  105. except (BrokenPipeError, KeyboardInterrupt):
  106. pass
  107. def usage():
  108. print('Usage: ia-cdx-search [--concurrency N] [--tries N] [--page N --numpages N] QUERY', file = sys.stderr)
  109. print('Please refer to https://github.com/internetarchive/wayback/tree/master/wayback-cdx-server for the relevant query parameters', file = sys.stderr)
  110. print('The output, limit, resumeKey, showResumeKey, page, and showNumPages parameters must not be included.', file = sys.stderr)
  111. print('To resume a search that failed for some reason, provide the page number and number of pages through the second argument instead.', file = sys.stderr)
  112. print('Output is produces in JSONL format with one line per CDX entry.', file = sys.stderr)
  113. print('', file = sys.stderr)
  114. print('Examples:', file = sys.stderr)
  115. print(" - Subdomains: ia-cdx-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/'", file = sys.stderr)
  116. print(' Note that this will only find subdomains whose homepages are in the Wayback Machine. To discover all known subdomains, remove the filter and then extract the domains from the results.', file = sys.stderr)
  117. print(" - Subdirectories: ia-cdex-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/[^/]*/'", file = sys.stderr)
  118. print(' The same caveat applies. The directory must have been retrieved directly without an additional trailing path or query string.', file = sys.stderr)
  119. sys.exit(1)
  120. args = sys.argv[1:]
  121. if not args or args[0].lower() in ('-h', '--help'):
  122. usage()
  123. kwargs = {}
  124. while args[0].startswith('--'):
  125. if args[0] == '--concurrency':
  126. kwargs['concurrency'] = int(args[1])
  127. args = args[2:]
  128. elif args[0] == '--tries':
  129. kwargs['tries'] = int(args[1])
  130. args = args[2:]
  131. elif args[0] == '--page' and args[2].lower() == '--numpages':
  132. kwargs['startPage'] = int(args[1])
  133. kwargs['numPages'] = int(args[3])
  134. args = args[4:]
  135. else:
  136. break
  137. if len(args) != 1 or re.search(r'(^|&)(output|limit|resumekey|showresumekey|page|shownumpages)=', args[0], re.IGNORECASE):
  138. usage()
  139. query = args[0]
  140. asyncio.run(main(query, **kwargs))