A VCS repository archival tool
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

401 rivejä
14 KiB

  1. import abc
  2. #import codearchiver.modules # In get_module_class
  3. import codearchiver.storage
  4. import codearchiver.version
  5. import collections
  6. import contextlib
  7. import dataclasses
  8. import functools
  9. import logging
  10. import os
  11. import queue
  12. import requests
  13. import time
  14. import typing
  15. import weakref
  16. _logger = logging.getLogger(__name__)
  17. class InputURL:
  18. '''
  19. An input URL
  20. This primarily exists so multiple modules can access the content behind the URL for checks in `Module.matches` without fetching multiple times.
  21. It also handles the module name prefix in the scheme part of the URL. Note that `InputURL.url` is then the part without the module name.
  22. '''
  23. def __init__(self, url: str):
  24. if 0 < url.find('+') < url.find('://'):
  25. # '+' and '://' appear in the URL in this order and there is at least one character each before the + as well as between the two
  26. self._moduleScheme, self._url = url.split('+', 1)
  27. else:
  28. self._moduleScheme = None
  29. self._url = url
  30. self._response = None
  31. @property
  32. def url(self) -> str:
  33. '''URL without the module scheme prefix (if any)'''
  34. return self._url
  35. @property
  36. def moduleScheme(self) -> typing.Optional[str]:
  37. '''Module scheme prefix (if one is included, else `None`)'''
  38. return self._moduleScheme
  39. @property
  40. def content(self) -> str:
  41. '''HTTP response body upon fetching the URL with GET'''
  42. if self._response is None:
  43. self._response = HttpClient().get(self.url)
  44. return self._response.text
  45. def __repr__(self):
  46. return f'{type(self).__module__}.{type(self).__name__}({self._url!r})'
  47. @dataclasses.dataclass
  48. class Result:
  49. '''Container for the result of a module'''
  50. id: str
  51. '''A unique ID for this result'''
  52. files: list[tuple[str, typing.Optional['Index']]] = dataclasses.field(default_factory = list)
  53. '''List of filenames produced by the run, optionally with an index'''
  54. submoduleResults: list[tuple['Module', 'Result']] = dataclasses.field(default_factory = list)
  55. '''List of related submodules and their results'''
  56. class IndexValidationError(ValueError):
  57. pass
  58. @dataclasses.dataclass
  59. class IndexField:
  60. key: str
  61. required: bool
  62. repeatable: bool
  63. class Index(list[tuple[str, str]]):
  64. '''An index (key-value mapping, possibly with repeated keys) of a file produced by a module'''
  65. fields: tuple[IndexField] = ()
  66. '''The fields for this index'''
  67. _allFieldsCache: typing.Optional[tuple[IndexField]] = None
  68. def append(self, *args):
  69. if len(args) == 1:
  70. args = args[0]
  71. return super().append(args)
  72. # This should be a @classmethod, too, but that's deprecated since Python 3.11.
  73. @property
  74. def _allFields(self):
  75. '''All fields known by this index, own ones and all from superclasses'''
  76. if type(self)._allFieldsCache is None:
  77. fields = []
  78. for cls in reversed(type(self).mro()):
  79. fields.extend(getattr(cls, 'fields', []))
  80. type(self)._allFieldsCache = tuple(fields)
  81. return type(self)._allFieldsCache
  82. def validate(self):
  83. '''Check that all keys and values in the index conform to the specification'''
  84. keyCounts = collections.Counter(key for key, _ in self)
  85. keys = set(keyCounts)
  86. permittedKeys = set(field.key for field in self._allFields)
  87. unrecognisedKeys = keys - permittedKeys
  88. if unrecognisedKeys:
  89. raise IndexValidationError(f'Unrecognised key(s): {", ".join(sorted(unrecognisedKeys))}')
  90. requiredKeys = set(field.key for field in self._allFields if field.required)
  91. missingRequiredKeys = requiredKeys - keys
  92. if missingRequiredKeys:
  93. raise IndexValidationError(f'Missing required key(s): {", ".join(sorted(missingRequiredKeys))}')
  94. repeatableKeys = set(field.key for field in self._allFields if field.repeatable)
  95. repeatedKeys = set(key for key, count in keyCounts.items() if count > 1)
  96. repeatedUnrepeatableKeys = repeatedKeys - repeatableKeys
  97. if repeatedUnrepeatableKeys:
  98. raise IndexValidationError(f'Repeated unrepeatable key(s): {", ".join(sorted(repeatedUnrepeatableKeys))}')
  99. def matches(self, criteria: list[tuple[str, typing.Union[str, tuple[str]]]]) -> bool:
  100. '''
  101. Check whether the criteria match this index
  102. Each criterion consists of a key and one or more possible values. A criterion matches if at least one of the specified values is present in the index.
  103. Multiple criteria may use the same key to perform an AND search.
  104. The index is a match if all criteria match.
  105. '''
  106. criteria = criteria.copy()
  107. _logger.debug(f'Searching index for {criteria!r}')
  108. keysOfInterest = set(key for key, _ in criteria)
  109. for key, value in self:
  110. if key not in keysOfInterest:
  111. continue
  112. _logger.debug(f'Potentially interesting entry: {key!r} = {value!r}')
  113. matched = [] # Indices to remove from remaining criteria
  114. for i, (keyCriterion, valueCriterion) in enumerate(criteria):
  115. if keyCriterion != key:
  116. continue
  117. if isinstance(valueCriterion, str) and valueCriterion == value:
  118. _logger.debug('Str match')
  119. matched.append(i)
  120. elif isinstance(valueCriterion, tuple) and value in valueCriterion:
  121. _logger.debug('Tuple match')
  122. matched.append(i)
  123. for i in reversed(matched):
  124. _logger.debug(f'Matched remaining criterion {i}: {criteria[i]}')
  125. del criteria[i]
  126. if not criteria:
  127. break
  128. _logger.debug(f'Remaining unmatched criteria: {criteria!r}')
  129. return not bool(criteria)
  130. def serialise(self) -> str:
  131. '''Convert the index to a string suitable for e.g. a simple text file storage'''
  132. self.validate()
  133. return ''.join(f'{key}: {value}\n' for key, value in self)
  134. @classmethod
  135. def deserialise(cls, f: typing.Union[str, bytes, os.PathLike, typing.TextIO], *, validate = True):
  136. '''Import a serialised index from a filename or file-like object'''
  137. if isinstance(f, (str, bytes, os.PathLike)):
  138. cm = open(f, 'r')
  139. else:
  140. cm = contextlib.nullcontext(f)
  141. with cm as fp:
  142. o = cls((key, value[:-1]) for key, value in map(functools.partial(str.split, sep = ': '), fp))
  143. if validate:
  144. o.validate()
  145. return o
  146. class HttpError(Exception):
  147. '''An HTTP request failed too many times.'''
  148. class HttpClient:
  149. '''A thin wrapper HTTP client around Requests with exponential backoff retries and a default user agent for all requests.'''
  150. defaultRetries: int = 3
  151. '''Default number of retries on errors unless overridden on creating the HttpClient object'''
  152. defaultUserAgent: str = f'codearchiver/{codearchiver.version.__version__}'
  153. '''Default user agent unless overridden on instantiation or by overriding via the headers kwarg'''
  154. def __init__(self, retries: typing.Optional[int] = None, userAgent: typing.Optional[str] = None):
  155. self._session = requests.Session()
  156. self._retries = retries if retries else self.defaultRetries
  157. self._userAgent = userAgent if userAgent else self.defaultUserAgent
  158. def request(self,
  159. method,
  160. url,
  161. params = None,
  162. data = None,
  163. headers: typing.Optional[dict[str, str]] = None,
  164. timeout: int = 10,
  165. responseOkCallback: typing.Optional[typing.Callable[[requests.Response], tuple[bool, typing.Optional[str]]]] = None,
  166. ) -> requests.Response:
  167. '''
  168. Make an HTTP request
  169. For the details on `method`, `url`, `params`, and `data`, refer to the Requests documentation on the constructor of `requests.Request`.
  170. For details on `timeout`, see `requests.adapters.HTTPAdapter.send`.
  171. `headers` can be used to specify any HTTP headers. Note that this is case-sensitive. To override the user agent, include a value for the `User-Agent` key here.
  172. `responseOkCallback` can be used to control whether a response is considered acceptable or not. By default, all HTTP responses are considered fine. If specified, this callable must produce a boolean marking whether the response is successful and an error message string. The string is used for logging purposes when the success flag is `False`; it should be `None` if the first return value is `True`.
  173. '''
  174. mergedHeaders = {'User-Agent': self._userAgent}
  175. if headers:
  176. mergedHeaders.update(headers)
  177. headers = mergedHeaders
  178. for attempt in range(self._retries + 1):
  179. # The request is newly prepared on each retry because of potential cookie updates.
  180. req = self._session.prepare_request(requests.Request(method, url, params = params, data = data, headers = headers))
  181. _logger.info(f'Retrieving {req.url}')
  182. _logger.debug(f'... with headers: {headers!r}')
  183. if data:
  184. _logger.debug(f'... with data: {data!r}')
  185. try:
  186. r = self._session.send(req, timeout = timeout)
  187. except requests.exceptions.RequestException as exc:
  188. if attempt < self._retries:
  189. retrying = ', retrying'
  190. level = logging.WARNING
  191. else:
  192. retrying = ''
  193. level = logging.ERROR
  194. _logger.log(level, f'Error retrieving {req.url}: {exc!r}{retrying}')
  195. else:
  196. if responseOkCallback is not None:
  197. success, msg = responseOkCallback(r)
  198. else:
  199. success, msg = (True, None)
  200. msg = f': {msg}' if msg else ''
  201. if success:
  202. _logger.debug(f'{req.url} retrieved successfully{msg}')
  203. return r
  204. else:
  205. if attempt < self._retries:
  206. retrying = ', retrying'
  207. level = logging.WARNING
  208. else:
  209. retrying = ''
  210. level = logging.ERROR
  211. _logger.log(level, f'Error retrieving {req.url}{msg}{retrying}')
  212. if attempt < self._retries:
  213. sleepTime = 1.0 * 2**attempt # exponential backoff: sleep 1 second after first attempt, 2 after second, 4 after third, etc.
  214. _logger.info(f'Waiting {sleepTime:.0f} seconds')
  215. time.sleep(sleepTime)
  216. else:
  217. msg = f'{self._retries + 1} requests to {req.url} failed, giving up.'
  218. _logger.fatal(msg)
  219. raise HttpError(msg)
  220. raise RuntimeError('Reached unreachable code')
  221. def get(self, *args, **kwargs):
  222. '''Make a GET request. This is equivalent to calling `.request('GET', ...)`.'''
  223. return self.request('GET', *args, **kwargs)
  224. def post(self, *args, **kwargs):
  225. '''Make a POST request. This is equivalent to calling `.request('POST', ...)`.'''
  226. return self.request('POST', *args, **kwargs)
  227. class ModuleMeta(type):
  228. '''Metaclass of modules. This is used to keep track of which modules exist and selecting them. It also enforces module name restrictions and prevents name collisions.'''
  229. __modulesByName: dict[str, typing.Type['Module']] = {}
  230. def __new__(cls, *args, **kwargs):
  231. class_ = super().__new__(cls, *args, **kwargs)
  232. if class_.name is not None:
  233. if class_.name.strip('abcdefghijklmnopqrstuvwxyz-') != '':
  234. raise RuntimeError(f'Invalid class name: {class_.name!r}')
  235. if class_.name in cls.__modulesByName:
  236. raise RuntimeError(f'Class name collision: {class_.name!r} is already known')
  237. cls.__modulesByName[class_.name] = weakref.ref(class_)
  238. _logger.info(f'Found {class_.name!r} module {class_.__module__}.{class_.__name__}')
  239. else:
  240. _logger.info(f'Found nameless module {class_.__module__}.{class_.__name__}')
  241. return class_
  242. @classmethod
  243. def get_module_by_name(cls, name: str) -> typing.Optional[typing.Type['Module']]:
  244. '''Get a module by name if one exists'''
  245. if classRef := cls.__modulesByName.get(name):
  246. class_ = classRef()
  247. if class_ is None:
  248. _logger.info(f'Module {name!r} is gone, dropping')
  249. del cls.__modulesByName[name]
  250. return class_
  251. @classmethod
  252. def iter_modules(cls) -> typing.Iterator[typing.Type['Module']]:
  253. '''Iterate over all known modules'''
  254. # Housekeeping first: remove dead modules
  255. for name in list(cls.__modulesByName): # create a copy of the names list so the dict can be modified in the loop
  256. if cls.__modulesByName[name]() is None:
  257. _logger.info(f'Module {name!r} is gone, dropping')
  258. del cls.__modulesByName[name]
  259. for name, classRef in cls.__modulesByName.items():
  260. class_ = classRef()
  261. if class_ is None:
  262. # Module class no longer exists, skip
  263. # Even though dead modules are removed above, it's possible that the code consuming this iterator drops/deletes modules.
  264. continue
  265. yield class_
  266. @classmethod
  267. def drop(cls, module: 'Module'):
  268. '''
  269. Remove a module from the list of known modules
  270. If a Module subclass is destroyed after `del MyModule`, it is also eventually removed from the list. However, as that relies on garbage collection, it should not be depended on and modules should be dropped with this method explicitly.
  271. '''
  272. if module.name is not None and module.name in cls.__modulesByName:
  273. del cls.__modulesByName[module.name]
  274. _logger.info(f'Module {module.name!r} dropped')
  275. def __del__(self, *args, **kwargs):
  276. if self.name is not None and self.name in type(self).__modulesByName:
  277. _logger.info(f'Module {self.name!r} is being destroyed, dropping')
  278. del type(self).__modulesByName[self.name]
  279. # type has no __del__ method, no need to call it.
  280. class Module(metaclass = ModuleMeta):
  281. '''An abstract base class for a module.'''
  282. name: typing.Optional[str] = None
  283. '''The name of the module. Modules without a name are ignored. Names must be unique and may only contain a-z and hyphens.'''
  284. @staticmethod
  285. def matches(inputUrl: InputURL) -> bool:
  286. '''Whether or not this module is for handling `inputUrl`.'''
  287. return False
  288. def __init__(self, inputUrl: InputURL, storage: typing.Optional[codearchiver.storage.Storage] = None, id_: typing.Optional[str] = None):
  289. self._inputUrl = inputUrl
  290. self._url = inputUrl.url
  291. self._storage = storage
  292. self._id = id_
  293. self._httpClient = HttpClient()
  294. @abc.abstractmethod
  295. def process(self) -> Result:
  296. '''Perform the relevant retrieval(s)'''
  297. def __repr__(self):
  298. return f'{type(self).__module__}.{type(self).__name__}({self._inputUrl!r})'
  299. def get_module_class(inputUrl: InputURL) -> typing.Type[Module]:
  300. '''Get the Module class most suitable for handling `inputUrl`.'''
  301. # Ensure that modules are imported
  302. # This can't be done at the top because the modules need to refer back to the Module class.
  303. import codearchiver.modules
  304. # Check if the URL references one of the modules directly
  305. if inputUrl.moduleScheme:
  306. if module := ModuleMeta.get_module_by_name(inputUrl.moduleScheme):
  307. _logger.info(f'Selecting module {module.__module__}.{module.__name__}')
  308. return module
  309. else:
  310. raise RuntimeError(f'No module with name {inputUrl.moduleScheme!r} exists')
  311. # Check if exactly one of the modules matches
  312. matches = [class_ for class_ in ModuleMeta.iter_modules() if class_.matches(inputUrl)]
  313. if len(matches) >= 2:
  314. _logger.error('Multiple matching modules for input URL')
  315. _logger.debug(f'Matching modules: {matches!r}')
  316. raise RuntimeError('Multiple matching modules for input URL')
  317. if matches:
  318. _logger.info(f'Selecting module {matches[0].__module__}.{matches[0].__name__}')
  319. return matches[0]
  320. raise RuntimeError('No matching modules for input URL')
  321. def get_module_instance(inputUrl: InputURL, **kwargs) -> Module:
  322. '''Get an instance of the Module class most suitable for handling `inputUrl`.'''
  323. return get_module_class(inputUrl)(inputUrl, **kwargs)