archiving community contributions on YouTube: unpublished captions, title and description translations and caption credits
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

251 lines
10 KiB

  1. # This function adapted from https://github.com/cdown/srt/blob/11089f1e021f2e074d04c33fc7ffc4b7b52e7045/srt.py, lines 69 and 189 (MIT License)
  2. def timedelta_to_sbv_timestamp(timedelta_timestamp):
  3. r"""
  4. Convert a :py:class:`~datetime.timedelta` to an SRT timestamp.
  5. .. doctest::
  6. >>> import datetime
  7. >>> delta = datetime.timedelta(hours=1, minutes=23, seconds=4)
  8. >>> timedelta_to_sbv_timestamp(delta)
  9. '01:23:04,000'
  10. :param datetime.timedelta timedelta_timestamp: A datetime to convert to an
  11. SBV timestamp
  12. :returns: The timestamp in SBV format
  13. :rtype: str
  14. """
  15. SECONDS_IN_HOUR = 3600
  16. SECONDS_IN_MINUTE = 60
  17. HOURS_IN_DAY = 24
  18. MICROSECONDS_IN_MILLISECOND = 1000
  19. hrs, secs_remainder = divmod(timedelta_timestamp.seconds, SECONDS_IN_HOUR)
  20. hrs += timedelta_timestamp.days * HOURS_IN_DAY
  21. mins, secs = divmod(secs_remainder, SECONDS_IN_MINUTE)
  22. msecs = timedelta_timestamp.microseconds // MICROSECONDS_IN_MILLISECOND
  23. return "%1d:%02d:%02d.%03d" % (hrs, mins, secs, msecs)
  24. from datetime import timedelta
  25. from json import dumps
  26. from gc import collect
  27. import requests
  28. # https://docs.python.org/3/library/html.parser.html
  29. from html.parser import HTMLParser
  30. class MyHTMLParser(HTMLParser):
  31. def __init__(self):
  32. HTMLParser.__init__(self)
  33. self.captions = []
  34. self.title = ""
  35. self.description = ""
  36. def check_attr(self, attrs, attr, value):
  37. for item in attrs:
  38. if item[0] == attr and item[1] == value:
  39. return True
  40. return False
  41. def get_attr(self, attrs, attr):
  42. for item in attrs:
  43. if item[0] == attr:
  44. return item[1]
  45. return False
  46. def handle_starttag(self, tag, attrs):
  47. if tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-start-time"):
  48. self.captions.append({"startTime": int(self.get_attr(attrs, "data-start-ms")), "text": ""})
  49. elif tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-end-time"):
  50. self.captions[len(self.captions)-1]["endTime"] = int(self.get_attr(attrs, "data-end-ms"))
  51. elif tag == "input" and self.check_attr(attrs, "id", "metadata-title"):
  52. self.title = self.get_attr(attrs, "value")
  53. def handle_data(self, data):
  54. if self.get_starttag_text() and self.get_starttag_text().startswith("<textarea "):
  55. if 'name="serve_text"' in self.get_starttag_text():
  56. self.captions[len(self.captions)-1]["text"] += data
  57. elif 'id="metadata-description"' in self.get_starttag_text():
  58. self.description += data
  59. def subprrun(jobs, mysession):
  60. while not jobs.empty():
  61. collect() #cleanup memory
  62. langcode, vid, mode = jobs.get()
  63. vid = vid.strip()
  64. print(langcode, vid)
  65. if mode == "default":
  66. pparams = (
  67. ("v", vid),
  68. ("lang", langcode),
  69. ("action_mde_edit_form", 1),
  70. ("bl", "vmp"),
  71. ("ui", "hd"),
  72. ("tab", "captions"),
  73. ("o", "U")
  74. )
  75. page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
  76. elif mode == "forceedit-metadata":
  77. pparams = (
  78. ("v", vid),
  79. ("lang", langcode),
  80. ("action_mde_edit_form", 1),
  81. ('forceedit', 'metadata'),
  82. ('tab', 'metadata')
  83. )
  84. page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
  85. elif mode == "forceedit-captions":
  86. pparams = (
  87. ("v", vid),
  88. ("lang", langcode),
  89. ("action_mde_edit_form", 1),
  90. ("bl", "vmp"),
  91. ("ui", "hd"),
  92. ('forceedit', 'captions'),
  93. ("tab", "captions"),
  94. ("o", "U")
  95. )
  96. page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
  97. assert not "accounts.google.com" in page.url, "Please supply authentication cookie information in config.json. See README.md for more information."
  98. inttext = page.text
  99. del page
  100. filestring = "_community"
  101. if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
  102. filestring = "_published"
  103. if mode == "forceedit-captions":
  104. filestring = "_community_revised"
  105. if not "forceedit" in mode:
  106. if '&amp;forceedit=metadata&amp;tab=metadata">See latest</a>' in inttext:
  107. jobs.put((langcode, vid, "forceedit-metadata"))
  108. if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
  109. jobs.put((langcode, vid, "forceedit-captions"))
  110. if 'id="reject-captions-button"' in inttext or 'id="reject-metadata-button"' in inttext or 'data-state="published"' in inttext or 'title="The video owner already provided subtitles/CC"' in inttext: #quick way of checking if this page is worth parsing
  111. parser = MyHTMLParser()
  112. parser.feed(inttext)
  113. captiontext = False
  114. for item in parser.captions:
  115. if item["text"][:-9]:
  116. captiontext = True
  117. if captiontext and (mode == "default" or mode == "forceedit-captions"):
  118. myfs = open("out/"+vid+"/"+vid+"_"+langcode+filestring+".sbv", "w", encoding="utf-8")
  119. captions = parser.captions
  120. captions.pop(0) #get rid of the fake one
  121. while captions:
  122. item = captions.pop(0)
  123. myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
  124. del item
  125. if captions:
  126. myfs.write("\n")
  127. del captions
  128. myfs.close()
  129. del myfs
  130. del captiontext
  131. if parser.title or parser.description[:-16] and (mode == "default" or mode == "forceedit-metadata"):
  132. metadata = {}
  133. metadata["title"] = parser.title
  134. if metadata["title"] == False:
  135. metadata["title"] = ""
  136. metadata["description"] = parser.description[:-16]
  137. filestring = "_community"
  138. if '<li id="captions-editor-nav-metadata" role="tab" data-state="published" class="published">' in inttext:
  139. filestring = "_published"
  140. if mode == "forceedit-metadata":
  141. filestring = "_community_revised"
  142. open("out/"+vid+"/"+vid+"_"+langcode+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
  143. del metadata
  144. del inttext
  145. del langcode
  146. del vid
  147. del pparams
  148. jobs.task_done()
  149. return True
  150. if __name__ == "__main__":
  151. from os import environ, mkdir
  152. from os.path import isfile
  153. from json import loads
  154. #HSID, SSID, SID cookies required
  155. if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
  156. cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
  157. elif isfile("config.json"):
  158. cookies = loads(open("config.json").read())
  159. else:
  160. print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
  161. assert False
  162. if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
  163. print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
  164. assert False
  165. mysession = requests.session()
  166. mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
  167. del cookies
  168. from sys import argv
  169. from queue import Queue
  170. from threading import Thread
  171. langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
  172. 'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
  173. 'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
  174. 'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
  175. 'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
  176. 'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
  177. 'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
  178. 'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
  179. 'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
  180. 'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
  181. 'xh', 'yi', 'yo', 'zu']
  182. vidl = argv
  183. vidl.pop(0)
  184. try:
  185. mkdir("out")
  186. except:
  187. pass
  188. jobs = Queue()
  189. for video in vidl:
  190. try:
  191. mkdir("out/"+video.strip())
  192. except:
  193. pass
  194. for lang in langs:
  195. jobs.put((lang, video, "default"))
  196. subthreads = []
  197. for r in range(50):
  198. subrunthread = Thread(target=subprrun, args=(jobs,mysession))
  199. subrunthread.start()
  200. subthreads.append(subrunthread)
  201. del subrunthread
  202. for xa in subthreads:
  203. xa.join() #bug (occurred once: the script ended before the last thread finished)
  204. subthreads.remove(xa)
  205. del xa