|
- #!/usr/bin/env python3
- import asyncio
- import collections
- import http.client
- import json
- import re
- import shlex
- import sys
-
-
- HOST = 'web.archive.org'
-
-
- def make_connection():
- return http.client.HTTPSConnection(HOST, timeout = 60)
-
-
- def fetch(url, tries, connection):
- for i in range(tries):
- try:
- print(f'GET {url}', file = sys.stderr)
- connection.request('GET', url)
- r = connection.getresponse()
- status = r.status
- print(f'{status} {url}', file = sys.stderr)
- if status != 200:
- raise RuntimeError(f'Could not fetch {url}')
- data = r.read()
- print(f'Read {len(data)} bytes from {url}', file = sys.stderr)
- o = json.loads(data)
- break
- except (RuntimeError, TimeoutError, http.client.HTTPException, json.JSONDecodeError) as e:
- print(f'Error retrieving {url}: {type(e).__module__}.{type(e).__name__} {e!s}', file = sys.stderr)
- connection.close()
- connection = make_connection()
- if i == tries - 1:
- raise
- return url, status, o, connection
-
-
- async def wait_first_and_print(tasks):
- if not tasks:
- return
- task = tasks.popleft()
- url, code, o, connection = await task
- assert o, 'got empty response'
- fields = o[0]
- assert all(len(v) == len(fields) for v in o[1:]), 'got unexpected response format'
- for row in o[1:]:
- print(json.dumps(dict(zip(fields, row))))
- print(f'Completed processing page {task._ia_cdx_page}', file = sys.stderr)
- return task._ia_cdx_page, connection
-
-
- async def main(query, concurrency = 1, tries = 1, startPage = None, numPages = None):
- assert (startPage is None) == (numPages is None)
- connections = collections.deque()
- for i in range(concurrency):
- connections.append(make_connection())
- baseUrl = f'/cdx/search/cdx?{query}'
- if startPage is None:
- url = f'{baseUrl}&showNumPages=true'
- connection = connections.popleft()
- _, _, numPages, connection = fetch(url, tries, connection)
- numPages = int(numPages)
- connections.append(connection)
- startPage = 0
- print(f'{numPages} pages', file = sys.stderr)
-
- loop = asyncio.get_running_loop()
- tasks = collections.deque()
- lastGoodPage = -1
- try:
- try:
- for page in range(startPage, numPages):
- while len(tasks) >= concurrency:
- lastGoodPage, connection = await wait_first_and_print(tasks)
- connections.append(connection)
- url = f'{baseUrl}&output=json&page={page}'
- connection = connections.popleft()
- task = loop.run_in_executor(None, fetch, url, tries, connection)
- task._ia_cdx_page = page
- tasks.append(task)
- while len(tasks) > 0:
- lastGoodPage, connection = await wait_first_and_print(tasks)
- connections.append(connection)
- except:
- # It isn't possible to actually cancel a task running in a thread, so need to await them and discard any additional errors that occur.
- for task in tasks:
- try:
- _, _, _, connection = await task
- except:
- pass
- else:
- connections.append(connection)
- for connection in connections:
- connection.close()
- raise
- except (RuntimeError, json.JSONDecodeError, AssertionError):
- concurrencyS = f'--concurrency {concurrency} ' if concurrency != 1 else ''
- triesS = f'--tries {tries} ' if tries != 1 else ''
- print(f'To resume this search from where it crashed, run: ia-cdx-search {concurrencyS}{triesS}--page {lastGoodPage + 1} --numpages {numPages} {shlex.quote(query)}', file = sys.stderr)
- raise
- except (BrokenPipeError, KeyboardInterrupt):
- pass
-
-
- def usage():
- print('Usage: ia-cdx-search [--concurrency N] [--tries N] [--page N --numpages N] QUERY', file = sys.stderr)
- print('Please refer to https://github.com/internetarchive/wayback/tree/master/wayback-cdx-server for the relevant query parameters', file = sys.stderr)
- print('The output, limit, resumeKey, showResumeKey, page, and showNumPages parameters must not be included.', file = sys.stderr)
- print('To resume a search that failed for some reason, provide the page number and number of pages through the second argument instead.', file = sys.stderr)
- print('Output is produces in JSONL format with one line per CDX entry.', file = sys.stderr)
- print('', file = sys.stderr)
- print('Examples:', file = sys.stderr)
- print(" - Subdomains: ia-cdx-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/'", file = sys.stderr)
- print(' Note that this will only find subdomains whose homepages are in the Wayback Machine. To discover all known subdomains, remove the filter and then extract the domains from the results.', file = sys.stderr)
- print(" - Subdirectories: ia-cdex-search 'url=example.org&collapse=urlkey&fl=original&matchType=domain&filter=original:^https?://[^/]*example\.org(?::[0-9]*)?/[^/]*/'", file = sys.stderr)
- print(' The same caveat applies. The directory must have been retrieved directly without an additional trailing path or query string.', file = sys.stderr)
- sys.exit(1)
-
-
- args = sys.argv[1:]
- if args[0].lower() in ('-h', '--help'):
- usage()
- kwargs = {}
- while args[0].startswith('--'):
- if args[0] == '--concurrency':
- kwargs['concurrency'] = int(args[1])
- args = args[2:]
- elif args[0] == '--tries':
- kwargs['tries'] = int(args[1])
- args = args[2:]
- elif args[0] == '--page' and args[2].lower() == '--numpages':
- kwargs['startPage'] = int(args[1])
- kwargs['numPages'] = int(args[3])
- args = args[4:]
- else:
- break
- if len(args) != 1 or re.search(r'(^|&)(output|limit|resumekey|showresumekey|page|shownumpages)=', args[0], re.IGNORECASE):
- usage()
- query = args[0]
- asyncio.run(main(query, **kwargs))
|