diff --git a/export.py b/export.py
index 309f010..76053dd 100644
--- a/export.py
+++ b/export.py
@@ -79,206 +79,202 @@ class MyHTMLParser(HTMLParser):
elif self.get_starttag_text() and self.get_starttag_text().startswith('
' in inttext:
- filestring = "_community_published"
+ try:
+ initlang = page.text.split("'metadataLanguage': \"", 1)[1].split('"', 1)[0]
+ except:
+ initlang = ""
- if mode == "forceedit-captions":
- filestring = "_community_draft"
+ del page
- if 'title="The video owner already provided subtitles/CC"' in inttext:
- filestring = "_uploader_provided"
+ filestring = "_community_draft"
+
+ if '
' in inttext:
+ filestring = "_community_published"
- if not "forceedit" in mode:
- if '&forceedit=metadata&tab=metadata">See latest' in inttext:
- jobs.put((langcode, vid, "forceedit-metadata"))
-
- if '' in inttext:
- jobs.put((langcode, vid, "forceedit-captions"))
-
- if 'id="reject-captions-button"' in inttext or 'id="reject-metadata-button"' in inttext or 'data-state="published"' in inttext or 'title="The video owner already provided subtitles/CC"' in inttext: #quick way of checking if this page is worth parsing
- parser = MyHTMLParser()
- parser.feed(inttext)
-
- captiontext = False
- for item in parser.captions:
- if item["text"][:-9]:
- captiontext = True
-
- if captiontext and (mode == "default" or mode == "forceedit-captions"):
- myfs = open("out/"+vid+"/"+vid+"_"+langcode+filestring+".sbv", "w", encoding="utf-8")
- captions = parser.captions
- captions.pop(0) #get rid of the fake one
- while captions:
- item = captions.pop(0)
-
- myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
-
- del item
- if captions:
- myfs.write("\n")
- del captions
- myfs.close()
- del myfs
-
- del captiontext
-
- if (parser.title or parser.description[:-16]) and (mode == "default" or mode == "forceedit-metadata"):
- metadata = {}
- metadata["title"] = parser.title
- if metadata["title"] == False:
- metadata["title"] = ""
- metadata["description"] = parser.description[:-16]
+ if mode == "forceedit-captions":
+ filestring = "_community_draft"
- filestring = "_community_draft"
- if '' in inttext:
- filestring = "_community_published"
+ if 'title="The video owner already provided subtitles/CC"' in inttext:
+ filestring = "_uploader_provided"
- if mode == "forceedit-metadata":
- filestring = "_community_draft"
- open("out/"+vid+"/"+vid+"_"+langcode+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
- del metadata
+ if not "forceedit" in mode:
+ if '&forceedit=metadata&tab=metadata">See latest' in inttext:
+ jobs.put(("subtitles-forceedit-metadata", vid, langcode))
- if (parser.inittitle[9:-17] or parser.initdescription) and (mode == "default" or mode == "forceedit-metadata" and initlang):
- metadata = {}
- metadata["title"] = parser.inittitle[9:-17]
- if metadata["title"] == False:
- metadata["title"] = ""
- metadata["description"] = parser.initdescription
+ if '' in inttext:
+ jobs.put(("subtitles-forceedit-captions", vid, langcode))
+
+ if 'id="reject-captions-button"' in inttext or 'id="reject-metadata-button"' in inttext or 'data-state="published"' in inttext or 'title="The video owner already provided subtitles/CC"' in inttext: #quick way of checking if this page is worth parsing
+ parser = MyHTMLParser()
+ parser.feed(inttext)
+
+ captiontext = False
+ for item in parser.captions:
+ if item["text"][:-9]:
+ captiontext = True
+
+ if captiontext and (mode == "default" or mode == "forceedit-captions"):
+ myfs = open("out/"+vid+"/"+vid+"_"+langcode+filestring+".sbv", "w", encoding="utf-8")
+ captions = parser.captions
+ captions.pop(0) #get rid of the fake one
+ while captions:
+ item = captions.pop(0)
+
+ myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
+
+ del item
+ if captions:
+ myfs.write("\n")
+ del captions
+ myfs.close()
+ del myfs
+
+ del captiontext
+
+ if (parser.title or parser.description[:-16]) and (mode == "default" or mode == "forceedit-metadata"):
+ metadata = {}
+ metadata["title"] = parser.title
+ if metadata["title"] == False:
+ metadata["title"] = ""
+ metadata["description"] = parser.description[:-16]
- filestring = "_uploader_provided"
- open("out/"+vid+"/"+vid+"_"+initlang+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
- del metadata
+ filestring = "_community_draft"
+ if '' in inttext:
+ filestring = "_community_published"
- del inttext
+ if mode == "forceedit-metadata":
+ filestring = "_community_draft"
+ open("out/"+vid+"/"+vid+"_"+langcode+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
+ del metadata
- del langcode
- del vid
- del pparams
+ if (parser.inittitle[9:-17] or parser.initdescription) and (mode == "default" or mode == "forceedit-metadata" and initlang):
+ metadata = {}
+ metadata["title"] = parser.inittitle[9:-17]
+ if metadata["title"] == False:
+ metadata["title"] = ""
+ metadata["description"] = parser.initdescription
- jobs.task_done()
+ filestring = "_uploader_provided"
+ open("out/"+vid+"/"+vid+"_"+initlang+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
+ del metadata
- return True
+ del inttext
-if __name__ == "__main__":
- from os import environ, mkdir
- from os.path import isfile
- from json import loads
- #HSID, SSID, SID cookies required
- if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
- cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
- elif isfile("config.json"):
- cookies = loads(open("config.json").read())
- else:
- print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
- assert False
- if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
- print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
- assert False
-
- mysession = requests.session()
- mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
- del cookies
- from sys import argv
- from queue import Queue
- from threading import Thread
- langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
- 'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
- 'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
- 'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
- 'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
- 'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
- 'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
- 'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
- 'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
- 'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
- 'xh', 'yi', 'yo', 'zu']
- vidl = argv
- vidl.pop(0)
+ del langcode
+ del vid
+ del pparams
- try:
- mkdir("out")
- except:
- pass
-
- jobs = Queue()
- for video in vidl:
- try:
- mkdir("out/"+video.strip())
- except:
- pass
- for lang in langs:
- jobs.put((lang, video, "default"))
-
- subthreads = []
-
- for r in range(50):
- subrunthread = Thread(target=subprrun, args=(jobs,mysession))
- subrunthread.start()
- subthreads.append(subrunthread)
- del subrunthread
-
- for xa in subthreads:
- xa.join() #bug (occurred once: the script ended before the last thread finished)
- subthreads.remove(xa)
- del xa
\ No newline at end of file
+ return True
+
+# if __name__ == "__main__":
+# from os import environ, mkdir
+# from os.path import isfile
+# from json import loads
+# #HSID, SSID, SID cookies required
+# if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
+# cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
+# elif isfile("config.json"):
+# cookies = loads(open("config.json").read())
+# else:
+# print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
+# assert False
+# if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
+# print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
+# assert False
+
+# mysession = requests.session()
+# mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
+# del cookies
+# from sys import argv
+# from queue import Queue
+# from threading import Thread
+# langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
+# 'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
+# 'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
+# 'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
+# 'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
+# 'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
+# 'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
+# 'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
+# 'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
+# 'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
+# 'xh', 'yi', 'yo', 'zu']
+# vidl = argv
+# vidl.pop(0)
+
+# try:
+# mkdir("out")
+# except:
+# pass
+
+# jobs = Queue()
+# for video in vidl:
+# try:
+# mkdir("out/"+video.strip())
+# except:
+# pass
+# for lang in langs:
+# jobs.put((lang, video, "default"))
+
+# subthreads = []
+
+# for r in range(50):
+# subrunthread = Thread(target=subprrun, args=(jobs,mysession))
+# subrunthread.start()
+# subthreads.append(subrunthread)
+# del subrunthread
+
+# for xa in subthreads:
+# xa.join() #bug (occurred once: the script ended before the last thread finished)
+# subthreads.remove(xa)
+# del xa
\ No newline at end of file
diff --git a/worker.py b/worker.py
index 08b0915..5be8086 100644
--- a/worker.py
+++ b/worker.py
@@ -87,6 +87,8 @@ class GracefulKiller:
gkiller = GracefulKiller()
+#TODO: discoveries, zipping, completion of subtitles
+
#minitasks
def threadrunner(jobs: Queue):
global langcnt
@@ -98,9 +100,35 @@ def threadrunner(jobs: Queue):
if task == "submitdiscovery":
tracker.add_item_to_tracker(args, vid)
elif task == "discovery":
- pass
+ while True:
+ try:
+ info = getmetadata(mysession, str(vid).strip())
+ break
+ except BaseException as e:
+ print(e)
+ print("Error in retrieving information, waiting 30 seconds")
+ sleep(30)
+ if info[0] or info[1]: # ccenabled or creditdata
+ if not isdir("out/"+str(vid).strip()):
+ mkdir("out/"+str(vid).strip())
+ if info[1]:
+ open("out/"+str(vid).strip()+"/"+str(vid).strip()+"_published_credits.json", "w").write(dumps(info[1]))
+
+ if info[0]:
+ langcnt[vid] = 0
+ for langcode in langs:
+ jobs.put(("subtitles", vid, langcode))
+ else:
+ jobs.put(("complete", None, "video:"+vid))
elif task == "subtitles":
- pass
+ subprrun(jobs, mysession, args, vid, "default")
+ langcnt[vid] += 1
+ if langcnt[vid] >= 195:
+ pass #complete(?)
+ elif task == "subtitles-forceedit-captions":
+ subprrun(jobs, mysession, args, vid, "forceedit-captions")
+ elif task == "subtitles-forceedit-metadata":
+ subprrun(jobs, mysession, args, vid, "forceedit-metadata")
elif task == "channel":
y = ydl.extract_info("https://www.youtube.com/channel/"+desit.split(":", 1)[1], download=False)
for itemyv in y["entries"]:
@@ -185,14 +213,7 @@ def prrun():
item = jobs.get()
print("Video ID:", str(item).strip())
- while True:
- try:
- info = getmetadata(mysession, str(item).strip())
- break
- except BaseException as e:
- print(e)
- print("Error in retrieving information, waiting 30 seconds")
- sleep(30)
+
# Add any discovered videos
recvids.update(info[2])
@@ -200,10 +221,6 @@ def prrun():
recmixes.update(info[4])
recplayl.update(info[5])
- if info[0] or info[1]: # ccenabled or creditdata
- if not isdir("out/"+str(item).strip()):
- mkdir("out/"+str(item).strip())
-
if info[1]: # creditdata
open("out/"+str(item).strip()+"/"+str(item).strip()+"_published_credits.json", "w").write(dumps(info[1]))