A VCS repository archival tool
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

496 lines
19 KiB

  1. import abc
  2. #import codearchiver.modules # In get_module_class
  3. import codearchiver.storage
  4. import codearchiver.version
  5. import collections
  6. import contextlib
  7. import dataclasses
  8. import datetime
  9. import functools
  10. import logging
  11. import os
  12. import queue
  13. import requests
  14. import time
  15. import typing
  16. import weakref
  17. _logger = logging.getLogger(__name__)
  18. class InputURL:
  19. '''
  20. An input URL
  21. This primarily exists so multiple modules can access the content behind the URL for checks in `Module.matches` without fetching multiple times.
  22. It also handles the module name prefix in the scheme part of the URL. Note that `InputURL.url` is then the part without the module name.
  23. '''
  24. def __init__(self, url: str):
  25. if 0 < url.find('+') < url.find('://'):
  26. # '+' and '://' appear in the URL in this order and there is at least one character each before the + as well as between the two
  27. self._moduleScheme, self._url = url.split('+', 1)
  28. else:
  29. self._moduleScheme = None
  30. self._url = url
  31. self._response = None
  32. @property
  33. def url(self) -> str:
  34. '''URL without the module scheme prefix (if any)'''
  35. return self._url
  36. @property
  37. def moduleScheme(self) -> typing.Optional[str]:
  38. '''Module scheme prefix (if one is included, else `None`)'''
  39. return self._moduleScheme
  40. @property
  41. def content(self) -> str:
  42. '''HTTP response body upon fetching the URL with GET'''
  43. if self._response is None:
  44. self._response = HttpClient().get(self.url)
  45. return self._response.text
  46. def __repr__(self):
  47. return f'{type(self).__module__}.{type(self).__name__}({self._url!r})'
  48. @dataclasses.dataclass
  49. class Result:
  50. '''Container for the result of a module'''
  51. id: str
  52. '''A unique ID for this result'''
  53. files: list[tuple[str, typing.Optional['Metadata']]] = dataclasses.field(default_factory = list)
  54. '''List of filenames produced by the run, optionally with metadata'''
  55. submoduleResults: list[tuple['Module', 'Result']] = dataclasses.field(default_factory = list)
  56. '''List of related submodules and their results'''
  57. class MetadataValidationError(ValueError):
  58. pass
  59. @dataclasses.dataclass
  60. class MetadataField:
  61. key: str
  62. required: bool
  63. repeatable: bool
  64. indexed: bool = False
  65. class Metadata(list[tuple[str, str]]):
  66. '''
  67. Metadata (key-value mapping, possibly with repeated keys) of a file produced by a module
  68. Fields are inherited. Subclasses meant to be usable should define their own version; the 'Metadata version' field is set by `Module.create_metadata` and collects all declared versions.
  69. '''
  70. fields: tuple[MetadataField] = (
  71. MetadataField('codearchiver version', required = True, repeatable = False),
  72. MetadataField('Module', required = True, repeatable = False, indexed = True),
  73. MetadataField('Metadata version', required = True, repeatable = False),
  74. MetadataField('ID', required = True, repeatable = False),
  75. MetadataField('Input URL', required = True, repeatable = False, indexed = True),
  76. MetadataField('Filename', required = True, repeatable = False),
  77. MetadataField('Retrieval start time', required = True, repeatable = False),
  78. MetadataField('Retrieval end time', required = True, repeatable = False),
  79. )
  80. '''The fields for this metadata collection'''
  81. version: int = 0
  82. '''Version, incremented on every backward-incompatible change'''
  83. # This cache needs to be different for each subclass.
  84. # The easiest way to achieve that is by mapping class objects to the corresponding cache.
  85. _allFieldsCache: dict[typing.Type['Metadata'], tuple[MetadataField]] = {}
  86. _subclassesByNameCache: dict[str, typing.Type['Metadata']] = {}
  87. def append(self, *args):
  88. if len(args) == 1:
  89. args = args[0]
  90. return super().append(args)
  91. # This should be a @classmethod, too, but that's deprecated since Python 3.11.
  92. @property
  93. def _allFields(self):
  94. '''All fields known by this metadata collection, own ones and all from superclasses'''
  95. cls = type(self)
  96. if cls not in cls._allFieldsCache:
  97. fields = []
  98. for cls_ in reversed(cls.mro()):
  99. fields.extend(getattr(cls_, 'fields', []))
  100. cls._allFieldsCache[cls] = tuple(fields)
  101. return cls._allFieldsCache[cls]
  102. @classmethod
  103. def _get_type_version_string(cls):
  104. if 'version' not in cls.__dict__:
  105. return None
  106. return f'{cls.__module__}.{cls.__qualname__}/{cls.version}'
  107. def validate(self):
  108. '''Check that all keys and values conform to the specification'''
  109. keyCounts = collections.Counter(key for key, _ in self)
  110. keys = set(keyCounts)
  111. permittedKeys = set(field.key for field in self._allFields)
  112. unrecognisedKeys = keys - permittedKeys
  113. requiredKeys = set(field.key for field in self._allFields if field.required)
  114. missingRequiredKeys = requiredKeys - keys
  115. repeatableKeys = set(field.key for field in self._allFields if field.repeatable)
  116. repeatedKeys = set(key for key, count in keyCounts.items() if count > 1)
  117. repeatedUnrepeatableKeys = repeatedKeys - repeatableKeys - unrecognisedKeys
  118. errors = []
  119. if unrecognisedKeys:
  120. errors.append(f'unrecognised key(s): {", ".join(sorted(unrecognisedKeys))}')
  121. if missingRequiredKeys:
  122. errors.append(f'missing required key(s): {", ".join(sorted(missingRequiredKeys))}')
  123. if repeatedUnrepeatableKeys:
  124. errors.append(f'repeated unrepeatable key(s): {", ".join(sorted(repeatedUnrepeatableKeys))}')
  125. if errors:
  126. raise MetadataValidationError('; '.join(errors))
  127. def matches(self, criteria: list[tuple[str, typing.Union[str, tuple[str]]]]) -> bool:
  128. '''
  129. Check whether the criteria match this metadata collection
  130. Each criterion consists of a key and one or more possible values. A criterion matches if at least one of the specified values is present in the metadata.
  131. Multiple criteria may use the same key to perform an AND search.
  132. The metadata is a match if all criteria match.
  133. '''
  134. criteria = criteria.copy()
  135. _logger.debug(f'Searching metadata for {criteria!r}')
  136. keysOfInterest = set(key for key, _ in criteria)
  137. for key, value in self:
  138. if key not in keysOfInterest:
  139. continue
  140. _logger.debug(f'Potentially interesting entry: {key!r} = {value!r}')
  141. matched = [] # Indices to remove from remaining criteria
  142. for i, (keyCriterion, valueCriterion) in enumerate(criteria):
  143. if keyCriterion != key:
  144. continue
  145. if isinstance(valueCriterion, str) and valueCriterion == value:
  146. _logger.debug('Str match')
  147. matched.append(i)
  148. elif isinstance(valueCriterion, tuple) and value in valueCriterion:
  149. _logger.debug('Tuple match')
  150. matched.append(i)
  151. for i in reversed(matched):
  152. _logger.debug(f'Matched remaining criterion {i}: {criteria[i]}')
  153. del criteria[i]
  154. if not criteria:
  155. break
  156. _logger.debug(f'Remaining unmatched criteria: {criteria!r}')
  157. return not bool(criteria)
  158. def serialise(self) -> str:
  159. '''Convert the metadata to a string suitable for e.g. a simple text file storage'''
  160. self.validate()
  161. return ''.join(f'{key}: {value}\n' for key, value in self)
  162. @classmethod
  163. def deserialise(cls, f: typing.Union[str, bytes, os.PathLike, typing.TextIO], *, validate = True):
  164. '''Import a serialised metadata from a filename or file-like object'''
  165. if isinstance(f, (str, bytes, os.PathLike)):
  166. cm = open(f, 'r')
  167. else:
  168. cm = contextlib.nullcontext(f)
  169. with cm as fp:
  170. o = cls((key, value[:-1]) for key, value in map(functools.partial(str.split, sep = ': ', maxsplit = 1), fp))
  171. # Extract the type and recreate with the correct Metadata subclass if necessary
  172. #TODO Implement a cleaner way of doing this than parsing it out of the 'Metadata version' field
  173. metaVersion = next((value for key, value in o if key == 'Metadata version'), None)
  174. if not metaVersion:
  175. raise MetadataValidationError('missing metadata version')
  176. #TODO Support for different metadata versions in case I need to bump it for backwards-incompatible changes since older files may still need to be read
  177. metaTypeVersionString = metaVersion.rsplit(' ', 1)[-1]
  178. if metaTypeVersionString not in cls._subclassesByNameCache:
  179. q = collections.deque()
  180. q.append(Metadata)
  181. while q:
  182. c = q.popleft()
  183. if (cts := c._get_type_version_string()):
  184. cls._subclassesByNameCache[cts] = c
  185. q.extend(c.__subclasses__())
  186. if (metaType := cls._subclassesByNameCache.get(metaTypeVersionString)) is not cls:
  187. o = metaType(o)
  188. if validate:
  189. o.validate()
  190. return o
  191. @property
  192. def indexedFields(self) -> typing.Iterator[str]:
  193. '''Yield fields known to this metadata collection that should be indexed'''
  194. yield from (field.key for field in self._allFields if field.indexed)
  195. def iter_indexed(self) -> typing.Iterator[tuple[str, str]]:
  196. '''Iterate over the metadata and return all indexed fields as key-value pairs'''
  197. indexedFields = set(self.indexedFields)
  198. yield from ((key, value) for key, value in self if key in indexedFields)
  199. class HttpError(Exception):
  200. '''An HTTP request failed too many times.'''
  201. class HttpClient:
  202. '''A thin wrapper HTTP client around Requests with exponential backoff retries and a default user agent for all requests.'''
  203. defaultRetries: int = 3
  204. '''Default number of retries on errors unless overridden on creating the HttpClient object'''
  205. defaultUserAgent: str = f'codearchiver/{codearchiver.version.__version__}'
  206. '''Default user agent unless overridden on instantiation or by overriding via the headers kwarg'''
  207. def __init__(self, retries: typing.Optional[int] = None, userAgent: typing.Optional[str] = None):
  208. self._session = requests.Session()
  209. self._retries = retries if retries else self.defaultRetries
  210. self._userAgent = userAgent if userAgent else self.defaultUserAgent
  211. def request(self,
  212. method,
  213. url,
  214. params = None,
  215. data = None,
  216. headers: typing.Optional[dict[str, str]] = None,
  217. timeout: int = 10,
  218. responseOkCallback: typing.Optional[typing.Callable[[requests.Response], tuple[bool, typing.Optional[str]]]] = None,
  219. ) -> requests.Response:
  220. '''
  221. Make an HTTP request
  222. For the details on `method`, `url`, `params`, and `data`, refer to the Requests documentation on the constructor of `requests.Request`.
  223. For details on `timeout`, see `requests.adapters.HTTPAdapter.send`.
  224. `headers` can be used to specify any HTTP headers. Note that this is case-sensitive. To override the user agent, include a value for the `User-Agent` key here.
  225. `responseOkCallback` can be used to control whether a response is considered acceptable or not. By default, all HTTP responses are considered fine. If specified, this callable must produce a boolean marking whether the response is successful and an error message string. The string is used for logging purposes when the success flag is `False`; it should be `None` if the first return value is `True`.
  226. '''
  227. mergedHeaders = {'User-Agent': self._userAgent}
  228. if headers:
  229. mergedHeaders.update(headers)
  230. headers = mergedHeaders
  231. for attempt in range(self._retries + 1):
  232. # The request is newly prepared on each retry because of potential cookie updates.
  233. req = self._session.prepare_request(requests.Request(method, url, params = params, data = data, headers = headers))
  234. _logger.info(f'Retrieving {req.url}')
  235. _logger.debug(f'... with headers: {headers!r}')
  236. if data:
  237. _logger.debug(f'... with data: {data!r}')
  238. try:
  239. r = self._session.send(req, timeout = timeout)
  240. except requests.exceptions.RequestException as exc:
  241. if attempt < self._retries:
  242. retrying = ', retrying'
  243. level = logging.WARNING
  244. else:
  245. retrying = ''
  246. level = logging.ERROR
  247. _logger.log(level, f'Error retrieving {req.url}: {exc!r}{retrying}')
  248. else:
  249. if responseOkCallback is not None:
  250. success, msg = responseOkCallback(r)
  251. else:
  252. success, msg = (True, None)
  253. msg = f': {msg}' if msg else ''
  254. if success:
  255. _logger.debug(f'{req.url} retrieved successfully{msg}')
  256. return r
  257. else:
  258. if attempt < self._retries:
  259. retrying = ', retrying'
  260. level = logging.WARNING
  261. else:
  262. retrying = ''
  263. level = logging.ERROR
  264. _logger.log(level, f'Error retrieving {req.url}{msg}{retrying}')
  265. if attempt < self._retries:
  266. sleepTime = 1.0 * 2**attempt # exponential backoff: sleep 1 second after first attempt, 2 after second, 4 after third, etc.
  267. _logger.info(f'Waiting {sleepTime:.0f} seconds')
  268. time.sleep(sleepTime)
  269. else:
  270. msg = f'{self._retries + 1} requests to {req.url} failed, giving up.'
  271. _logger.fatal(msg)
  272. raise HttpError(msg)
  273. raise RuntimeError('Reached unreachable code')
  274. def get(self, *args, **kwargs):
  275. '''Make a GET request. This is equivalent to calling `.request('GET', ...)`.'''
  276. return self.request('GET', *args, **kwargs)
  277. def post(self, *args, **kwargs):
  278. '''Make a POST request. This is equivalent to calling `.request('POST', ...)`.'''
  279. return self.request('POST', *args, **kwargs)
  280. class ModuleMeta(abc.ABCMeta):
  281. '''Metaclass of modules. This is used to keep track of which modules exist and selecting them. It also enforces module name restrictions and prevents name collisions.'''
  282. __modulesByName: dict[str, typing.Type['Module']] = {}
  283. def __new__(cls, *args, **kwargs):
  284. class_ = super().__new__(cls, *args, **kwargs)
  285. if class_.name is not None:
  286. if class_.name.strip('abcdefghijklmnopqrstuvwxyz-') != '':
  287. raise RuntimeError(f'Invalid class name: {class_.name!r}')
  288. if class_.name in cls.__modulesByName:
  289. raise RuntimeError(f'Class name collision: {class_.name!r} is already known')
  290. cls.__modulesByName[class_.name] = weakref.ref(class_)
  291. _logger.info(f'Found {class_.name!r} module {class_.__module__}.{class_.__name__}')
  292. else:
  293. _logger.info(f'Found nameless module {class_.__module__}.{class_.__name__}')
  294. return class_
  295. @classmethod
  296. def get_module_by_name(cls, name: str) -> typing.Optional[typing.Type['Module']]:
  297. '''Get a module by name if one exists'''
  298. if classRef := cls.__modulesByName.get(name):
  299. class_ = classRef()
  300. if class_ is None:
  301. _logger.info(f'Module {name!r} is gone, dropping')
  302. del cls.__modulesByName[name]
  303. return class_
  304. @classmethod
  305. def iter_modules(cls) -> typing.Iterator[typing.Type['Module']]:
  306. '''Iterate over all known modules'''
  307. # Housekeeping first: remove dead modules
  308. for name in list(cls.__modulesByName): # create a copy of the names list so the dict can be modified in the loop
  309. if cls.__modulesByName[name]() is None:
  310. _logger.info(f'Module {name!r} is gone, dropping')
  311. del cls.__modulesByName[name]
  312. for name, classRef in cls.__modulesByName.items():
  313. class_ = classRef()
  314. if class_ is None:
  315. # Module class no longer exists, skip
  316. # Even though dead modules are removed above, it's possible that the code consuming this iterator drops/deletes modules.
  317. continue
  318. yield class_
  319. @classmethod
  320. def drop(cls, module: 'Module'):
  321. '''
  322. Remove a module from the list of known modules
  323. If a Module subclass is destroyed after `del MyModule`, it is also eventually removed from the list. However, as that relies on garbage collection, it should not be depended on and modules should be dropped with this method explicitly.
  324. '''
  325. if module.name is not None and module.name in cls.__modulesByName:
  326. del cls.__modulesByName[module.name]
  327. _logger.info(f'Module {module.name!r} dropped')
  328. def __del__(self, *args, **kwargs):
  329. if self.name is not None and self.name in type(self).__modulesByName:
  330. _logger.info(f'Module {self.name!r} is being destroyed, dropping')
  331. del type(self).__modulesByName[self.name]
  332. # type has no __del__ method, no need to call it.
  333. class Module(metaclass = ModuleMeta):
  334. '''An abstract base class for a module.'''
  335. name: typing.Optional[str] = None
  336. '''The name of the module. Modules without a name are ignored. Names must be unique and may only contain a-z and hyphens.'''
  337. MetadataClass: typing.Optional[typing.Type[Metadata]] = None
  338. '''The Metadata class corresponding to this module, if any.'''
  339. @staticmethod
  340. def matches(inputUrl: InputURL) -> bool:
  341. '''Whether or not this module is for handling `inputUrl`.'''
  342. return False
  343. def __init__(self, inputUrl: InputURL, storage: typing.Optional[codearchiver.storage.Storage] = None, id_: typing.Optional[str] = None):
  344. self._inputUrl = inputUrl
  345. self._url = inputUrl.url
  346. self._storage = storage
  347. self._id = id_
  348. if self._id is None and type(self).name is not None:
  349. mangledUrl = self._url.replace('/', '_').replace('?', '_').replace('&', '_').replace('#', '_')
  350. self._id = f'{type(self).name}_{mangledUrl}_{datetime.datetime.utcnow():%Y%m%dT%H%M%SZ}'
  351. self._httpClient = HttpClient()
  352. @abc.abstractmethod
  353. def process(self) -> Result:
  354. '''Perform the relevant retrieval(s)'''
  355. def create_metadata(self, filename: str, startTime: datetime.datetime, endTime: datetime.datetime) -> Metadata:
  356. '''
  357. Create a basic Metadata instance appropriate for this module
  358. `startTime` and `endTime` must be in UTC (e.g. `datetime.datetime.utcnow()`). They should reflect the moments just before and after all interaction with the remote system.
  359. '''
  360. if type(self).MetadataClass is None or type(self).name is None:
  361. raise RuntimeError('Module lacks an MetadataClass or a name; cannot create metadata')
  362. idx = type(self).MetadataClass()
  363. idx.append('codearchiver version', codearchiver.version.__version__)
  364. idx.append('Module', type(self).name)
  365. metadataVersions = []
  366. for cls in reversed(type(self).MetadataClass.mro()):
  367. if (f := getattr(cls, '_get_type_version_string', None)) and (version := f()):
  368. metadataVersions.append(version)
  369. idx.append('Metadata version', ' '.join(metadataVersions))
  370. idx.append('ID', self._id)
  371. idx.append('Input URL', self._url)
  372. idx.append('Filename', filename)
  373. idx.append('Retrieval start time', startTime.strftime('%Y-%m-%d %H:%M:%S.%f UTC'))
  374. idx.append('Retrieval end time', endTime.strftime('%Y-%m-%d %H:%M:%S.%f UTC'))
  375. return idx
  376. def __repr__(self):
  377. return f'{type(self).__module__}.{type(self).__name__}({self._inputUrl!r})'
  378. def get_module_class(inputUrl: InputURL) -> typing.Type[Module]:
  379. '''Get the Module class most suitable for handling `inputUrl`.'''
  380. # Ensure that modules are imported
  381. # This can't be done at the top because the modules need to refer back to the Module class.
  382. import codearchiver.modules
  383. # Check if the URL references one of the modules directly
  384. if inputUrl.moduleScheme:
  385. if module := ModuleMeta.get_module_by_name(inputUrl.moduleScheme):
  386. _logger.info(f'Selecting module {module.__module__}.{module.__name__}')
  387. return module
  388. else:
  389. raise RuntimeError(f'No module with name {inputUrl.moduleScheme!r} exists')
  390. # Check if exactly one of the modules matches
  391. matches = [class_ for class_ in ModuleMeta.iter_modules() if class_.matches(inputUrl)]
  392. if len(matches) >= 2:
  393. _logger.error('Multiple matching modules for input URL')
  394. _logger.debug(f'Matching modules: {matches!r}')
  395. raise RuntimeError('Multiple matching modules for input URL')
  396. if matches:
  397. _logger.info(f'Selecting module {matches[0].__module__}.{matches[0].__name__}')
  398. return matches[0]
  399. raise RuntimeError('No matching modules for input URL')
  400. def get_module_instance(inputUrl: InputURL, **kwargs) -> Module:
  401. '''Get an instance of the Module class most suitable for handling `inputUrl`.'''
  402. return get_module_class(inputUrl)(inputUrl, **kwargs)