archiving community contributions on YouTube: unpublished captions, title and description translations and caption credits
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

310 lines
9.7 KiB

  1. from threading import Thread
  2. import requests
  3. from time import sleep
  4. from os import mkdir, rmdir, listdir, system, environ
  5. from os.path import isdir, isfile, getsize
  6. from json import dumps, loads
  7. import signal
  8. from youtube_dl.utils import DownloadError
  9. import tracker
  10. from youtube_dl import YoutubeDL
  11. from shutil import make_archive, rmtree
  12. from queue import Queue
  13. from gc import collect
  14. from discovery import getmetadata
  15. from export import subprrun
  16. batchcontent = []
  17. def batchfunc():
  18. while len(batchcontent) < 500:
  19. batchcontent.append(tracker.request_item_from_tracker())
  20. def submitfunc(submitqueue):
  21. while not submitqueue.empty():
  22. itype, ival = submitqueue.get()
  23. tracker.add_item_to_tracker(itype, ival)
  24. WORKER_VERSION = 1
  25. SERVER_BASE_URL = "http://localhost:5000"
  26. langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
  27. 'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
  28. 'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
  29. 'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
  30. 'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
  31. 'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
  32. 'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
  33. 'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
  34. 'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
  35. 'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
  36. 'xh', 'yi', 'yo', 'zu']
  37. #useful Queue example: https://stackoverflow.com/a/54658363
  38. jobs = Queue()
  39. ccenabledl = []
  40. recvids = set()
  41. recchans = set()
  42. recmixes = set()
  43. recplayl = set()
  44. #HSID, SSID, SID cookies required
  45. if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
  46. cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
  47. elif isfile("config.json"):
  48. cookies = loads(open("config.json").read())
  49. else:
  50. print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
  51. assert False
  52. if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
  53. print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
  54. assert False
  55. mysession = requests.session()
  56. mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
  57. del cookies
  58. #Graceful Shutdown
  59. class GracefulKiller:
  60. kill_now = False
  61. def __init__(self):
  62. signal.signal(signal.SIGINT, self.exit_gracefully)
  63. signal.signal(signal.SIGTERM, self.exit_gracefully)
  64. def exit_gracefully(self,signum, frame):
  65. self.kill_now = True
  66. gkiller = GracefulKiller()
  67. def prrun():
  68. while not jobs.empty():
  69. global recvids
  70. global recchans
  71. global recmixes
  72. global recplayl
  73. global ccenabledl
  74. item = jobs.get()
  75. print("Video ID:", str(item).strip())
  76. while True:
  77. try:
  78. info = getmetadata(str(item).strip())
  79. break
  80. except BaseException as e:
  81. print(e)
  82. print("Error in retrieving information, waiting 30 seconds")
  83. #raise
  84. sleep(30)
  85. ydl = YoutubeDL({"extract_flat": "in_playlist", "simulate": True, "skip_download": True, "quiet": True})
  86. for chaninfo in info[3]:
  87. if chaninfo not in recchans:
  88. while True:
  89. try:
  90. y = ydl.extract_info("https://www.youtube.com/channel/"+chaninfo, download=False)
  91. break
  92. except:
  93. sleep(30)
  94. sleep(5) #prevent error 429
  95. for itemyv in y["entries"]:
  96. recvids.add(itemyv["id"])
  97. for playlinfo in info[5]:
  98. if playlinfo not in recplayl:
  99. while True:
  100. try:
  101. y = ydl.extract_info("https://www.youtube.com/playlist?list="+playlinfo, download=False)
  102. break
  103. except:
  104. sleep(30)
  105. sleep(5) #prevent error 429
  106. for itemyvp in y["entries"]:
  107. recvids.add(itemyvp["id"])
  108. # Add any discovered videos
  109. recvids.update(info[2])
  110. recchans.update(info[3])
  111. recmixes.update(info[4])
  112. recplayl.update(info[5])
  113. if info[0] or info[1]: # ccenabled or creditdata
  114. if not isdir("out/"+str(item).strip()):
  115. mkdir("out/"+str(item).strip())
  116. if info[1]: # creditdata
  117. open("out/"+str(item).strip()+"/"+str(item).strip()+"_published_credits.json", "w").write(dumps(info[1]))
  118. if info[0]: #ccenabled
  119. ccenabledl.append(item)
  120. jobs.task_done()
  121. return True
  122. while not gkiller.kill_now:
  123. collect() #cleanup
  124. try:
  125. mkdir("out")
  126. except:
  127. pass
  128. batchcontent.clear()
  129. # Get a batch ID
  130. batchthreads = []
  131. for r in range(50):
  132. batchrunthread = Thread(target=batchfunc)
  133. batchrunthread.start()
  134. batchthreads.append(batchrunthread)
  135. del batchrunthread
  136. for xc in batchthreads:
  137. xc.join() #bug (occurred once: the script ended before the last thread finished)
  138. batchthreads.remove(xc)
  139. del xc
  140. #for ir in range(501):
  141. # batchcontent.append(tracker.request_item_from_tracker())
  142. for desit in batchcontent:
  143. if desit:
  144. if desit.split(":", 1)[0] == "video":
  145. jobs.put(desit.split(":", 1)[1])
  146. else:
  147. print("Ignoring item for now", desit)
  148. else:
  149. print("Ignoring item for now", desit)
  150. threads = []
  151. for i in range(50):
  152. runthread = Thread(target=prrun)
  153. runthread.start()
  154. threads.append(runthread)
  155. del runthread
  156. for x in threads:
  157. x.join()
  158. threads.remove(x)
  159. del x
  160. print("Sending discoveries to tracker...")
  161. submitjobs = Queue()
  162. #don't send channels and playlists as those have already been converted for video IDs
  163. #IDK how to handle mixes so send them for now
  164. print(len(recvids))
  165. for itemvid in recvids:
  166. submitjobs.put((tracker.ItemType.Video, itemvid))
  167. print(len(recmixes))
  168. for itemmix in recmixes:
  169. submitjobs.put((tracker.ItemType.MixPlaylist, itemmix))
  170. #open("out/discoveries.json", "w").write(dumps({"recvids": sorted(recvids), "recchans": sorted(recchans), "recmixes": sorted(recmixes), "recplayl": sorted(recplayl)}))
  171. #clear
  172. recvids.clear()
  173. recchans.clear()
  174. recmixes.clear()
  175. recplayl.clear()
  176. submitthreads = []
  177. for r in range(50):
  178. submitrunthread = Thread(target=submitfunc, args=(submitjobs,))
  179. submitrunthread.start()
  180. submitthreads.append(submitrunthread)
  181. del submitrunthread
  182. for xb in submitthreads:
  183. xb.join() #bug (occurred once: the script ended before the last thread finished)
  184. submitthreads.remove(xb)
  185. del xb
  186. sleep(1)
  187. subtjobs = Queue()
  188. while ccenabledl:
  189. langcontent = langs.copy()
  190. intvid = ccenabledl.pop(0)
  191. while langcontent:
  192. subtjobs.put((langcontent.pop(0), intvid, "default"))
  193. del intvid
  194. del langcontent
  195. subthreads = []
  196. for r in range(50):
  197. subrunthread = Thread(target=subprrun, args=(subtjobs,mysession))
  198. subrunthread.start()
  199. subthreads.append(subrunthread)
  200. del subrunthread
  201. for xa in subthreads:
  202. xa.join() #bug (occurred once: the script ended before the last thread finished)
  203. subthreads.remove(xa)
  204. del xa
  205. sleep(1) #wait a second to hopefully allow the other threads to finish
  206. for fol in listdir("out"): #remove extra folders
  207. try:
  208. if isdir("out/"+fol):
  209. rmdir("out/"+fol)
  210. except:
  211. pass
  212. #https://stackoverflow.com/a/11968881
  213. # TODO: put the data somewhere...
  214. # TODO: put the discoveries somewhere...
  215. for fol in listdir("out"):
  216. if isdir("out/"+fol):
  217. make_archive("out/"+fol, "zip", "out/"+fol) #check this
  218. targetloc = None
  219. while not targetloc:
  220. targetloc = tracker.request_upload_target()
  221. if targetloc:
  222. break
  223. else:
  224. print("Waiting 5 minutes...")
  225. sleep(300)
  226. for zipf in listdir("out"):
  227. if isfile(zipf) in zipf.endswith(".zip"):
  228. if targetloc.startswith("rsync"):
  229. system("rsync out/"+zipf+" "+targetloc)
  230. elif targetloc.startswith("http"):
  231. upzipf = open("out/"+zipf, "rb")
  232. requests.post(targetloc, data=upzipf)
  233. upzipf.close()
  234. #upload it!
  235. # Report the batch as complete
  236. for itemb in batchcontent:
  237. if isfile("out/"+itemb.split(":", 1)[1]+".zip"):
  238. size = getsize("out/"+itemb.split(":", 1)[1]+".zip")
  239. else:
  240. size = 0
  241. tracker.mark_item_as_done(itemb, size)
  242. # clear the output directory
  243. rmtree("out")