Browse Source

More WIP

microtasks
tech234a 3 years ago
parent
commit
fb4b423da2
2 changed files with 216 additions and 203 deletions
  1. +185
    -189
      export.py
  2. +31
    -14
      worker.py

+ 185
- 189
export.py View File

@@ -79,206 +79,202 @@ class MyHTMLParser(HTMLParser):
elif self.get_starttag_text() and self.get_starttag_text().startswith('<div id="original-video-title"'):
self.inittitle += data

def subprrun(jobs, mysession):
while not jobs.empty():
collect() #cleanup memory
langcode, vid, mode = jobs.get()
vid = vid.strip()
print(langcode, vid)

while True:
if mode == "default":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
("bl", "vmp"),
("ui", "hd"),
("tab", "captions"),
("o", "U")
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
elif mode == "forceedit-metadata":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
('forceedit', 'metadata'),
('tab', 'metadata')
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
elif mode == "forceedit-captions":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
("bl", "vmp"),
("ui", "hd"),
('forceedit', 'captions'),
("tab", "captions"),
("o", "U")
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)

if not "accounts.google.com" in page.url:
break
else:
print("[Retrying in 30 seconds] Please supply authentication cookie information in config.json or environment variables. See README.md for more information.")
sleep(30)

inttext = page.text

try:
initlang = page.text.split("'metadataLanguage': \"", 1)[1].split('"', 1)[0]
except:
initlang = ""

del page
def subprrun(jobs, mysession, langcode, vid, mode):
collect() #cleanup memory
vid = vid.strip()
print(langcode, vid)

while True:
if mode == "default":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
("bl", "vmp"),
("ui", "hd"),
("tab", "captions"),
("o", "U")
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
elif mode == "forceedit-metadata":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
('forceedit', 'metadata'),
('tab', 'metadata')
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)
elif mode == "forceedit-captions":
pparams = (
("v", vid),
("lang", langcode),
("action_mde_edit_form", 1),
("bl", "vmp"),
("ui", "hd"),
('forceedit', 'captions'),
("tab", "captions"),
("o", "U")
)

page = mysession.get("https://www.youtube.com/timedtext_editor", params=pparams)

if not "accounts.google.com" in page.url:
break
else:
print("[Retrying in 30 seconds] Please supply authentication cookie information in config.json or environment variables. See README.md for more information.")
sleep(30)

inttext = page.text

filestring = "_community_draft"
if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
filestring = "_community_published"
try:
initlang = page.text.split("'metadataLanguage': \"", 1)[1].split('"', 1)[0]
except:
initlang = ""

if mode == "forceedit-captions":
filestring = "_community_draft"
del page

if 'title="The video owner already provided subtitles/CC"' in inttext:
filestring = "_uploader_provided"
filestring = "_community_draft"
if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
filestring = "_community_published"

if not "forceedit" in mode:
if '&amp;forceedit=metadata&amp;tab=metadata">See latest</a>' in inttext:
jobs.put((langcode, vid, "forceedit-metadata"))

if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
jobs.put((langcode, vid, "forceedit-captions"))

if 'id="reject-captions-button"' in inttext or 'id="reject-metadata-button"' in inttext or 'data-state="published"' in inttext or 'title="The video owner already provided subtitles/CC"' in inttext: #quick way of checking if this page is worth parsing
parser = MyHTMLParser()
parser.feed(inttext)

captiontext = False
for item in parser.captions:
if item["text"][:-9]:
captiontext = True

if captiontext and (mode == "default" or mode == "forceedit-captions"):
myfs = open("out/"+vid+"/"+vid+"_"+langcode+filestring+".sbv", "w", encoding="utf-8")
captions = parser.captions
captions.pop(0) #get rid of the fake one
while captions:
item = captions.pop(0)

myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
del item
if captions:
myfs.write("\n")
del captions
myfs.close()
del myfs

del captiontext

if (parser.title or parser.description[:-16]) and (mode == "default" or mode == "forceedit-metadata"):
metadata = {}
metadata["title"] = parser.title
if metadata["title"] == False:
metadata["title"] = ""
metadata["description"] = parser.description[:-16]
if mode == "forceedit-captions":
filestring = "_community_draft"

filestring = "_community_draft"
if '<li id="captions-editor-nav-metadata" role="tab" data-state="published" class="published">' in inttext:
filestring = "_community_published"
if 'title="The video owner already provided subtitles/CC"' in inttext:
filestring = "_uploader_provided"

if mode == "forceedit-metadata":
filestring = "_community_draft"
open("out/"+vid+"/"+vid+"_"+langcode+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
del metadata
if not "forceedit" in mode:
if '&amp;forceedit=metadata&amp;tab=metadata">See latest</a>' in inttext:
jobs.put(("subtitles-forceedit-metadata", vid, langcode))

if (parser.inittitle[9:-17] or parser.initdescription) and (mode == "default" or mode == "forceedit-metadata" and initlang):
metadata = {}
metadata["title"] = parser.inittitle[9:-17]
if metadata["title"] == False:
metadata["title"] = ""
metadata["description"] = parser.initdescription
if '<li id="captions-editor-nav-captions" role="tab" data-state="published" class="published">' in inttext:
jobs.put(("subtitles-forceedit-captions", vid, langcode))

if 'id="reject-captions-button"' in inttext or 'id="reject-metadata-button"' in inttext or 'data-state="published"' in inttext or 'title="The video owner already provided subtitles/CC"' in inttext: #quick way of checking if this page is worth parsing
parser = MyHTMLParser()
parser.feed(inttext)

captiontext = False
for item in parser.captions:
if item["text"][:-9]:
captiontext = True

if captiontext and (mode == "default" or mode == "forceedit-captions"):
myfs = open("out/"+vid+"/"+vid+"_"+langcode+filestring+".sbv", "w", encoding="utf-8")
captions = parser.captions
captions.pop(0) #get rid of the fake one
while captions:
item = captions.pop(0)

myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
del item
if captions:
myfs.write("\n")
del captions
myfs.close()
del myfs

del captiontext

if (parser.title or parser.description[:-16]) and (mode == "default" or mode == "forceedit-metadata"):
metadata = {}
metadata["title"] = parser.title
if metadata["title"] == False:
metadata["title"] = ""
metadata["description"] = parser.description[:-16]

filestring = "_uploader_provided"
open("out/"+vid+"/"+vid+"_"+initlang+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
del metadata
filestring = "_community_draft"
if '<li id="captions-editor-nav-metadata" role="tab" data-state="published" class="published">' in inttext:
filestring = "_community_published"

del inttext
if mode == "forceedit-metadata":
filestring = "_community_draft"
open("out/"+vid+"/"+vid+"_"+langcode+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
del metadata

del langcode
del vid
del pparams
if (parser.inittitle[9:-17] or parser.initdescription) and (mode == "default" or mode == "forceedit-metadata" and initlang):
metadata = {}
metadata["title"] = parser.inittitle[9:-17]
if metadata["title"] == False:
metadata["title"] = ""
metadata["description"] = parser.initdescription

jobs.task_done()
filestring = "_uploader_provided"
open("out/"+vid+"/"+vid+"_"+initlang+filestring+".json", "w", encoding="utf-8").write(dumps(metadata))
del metadata

return True
del inttext

if __name__ == "__main__":
from os import environ, mkdir
from os.path import isfile
from json import loads
#HSID, SSID, SID cookies required
if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
elif isfile("config.json"):
cookies = loads(open("config.json").read())
else:
print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
assert False
if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
assert False

mysession = requests.session()
mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
del cookies
from sys import argv
from queue import Queue
from threading import Thread
langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
'xh', 'yi', 'yo', 'zu']
vidl = argv
vidl.pop(0)
del langcode
del vid
del pparams

try:
mkdir("out")
except:
pass

jobs = Queue()
for video in vidl:
try:
mkdir("out/"+video.strip())
except:
pass
for lang in langs:
jobs.put((lang, video, "default"))

subthreads = []

for r in range(50):
subrunthread = Thread(target=subprrun, args=(jobs,mysession))
subrunthread.start()
subthreads.append(subrunthread)
del subrunthread

for xa in subthreads:
xa.join() #bug (occurred once: the script ended before the last thread finished)
subthreads.remove(xa)
del xa
return True

# if __name__ == "__main__":
# from os import environ, mkdir
# from os.path import isfile
# from json import loads
# #HSID, SSID, SID cookies required
# if "HSID" in environ.keys() and "SSID" in environ.keys() and "SID" in environ.keys():
# cookies = {"HSID": environ["HSID"], "SSID": environ["SSID"], "SID": environ["SID"]}
# elif isfile("config.json"):
# cookies = loads(open("config.json").read())
# else:
# print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
# assert False
# if not (cookies["HSID"] and cookies["SSID"] and cookies["SID"]):
# print("HSID, SSID, and SID cookies from youtube.com are required. Specify in config.json or as environment variables.")
# assert False

# mysession = requests.session()
# mysession.headers.update({"cookie": "HSID="+cookies["HSID"]+"; SSID="+cookies["SSID"]+"; SID="+cookies["SID"], "Accept-Language": "en-US",})
# del cookies
# from sys import argv
# from queue import Queue
# from threading import Thread
# langs = ['ab', 'aa', 'af', 'sq', 'ase', 'am', 'ar', 'arc', 'hy', 'as', 'ay', 'az', 'bn', 'ba', 'eu', 'be', 'bh', 'bi', 'bs', 'br',
# 'bg', 'yue', 'yue-HK', 'ca', 'chr', 'zh-CN', 'zh-HK', 'zh-Hans', 'zh-SG', 'zh-TW', 'zh-Hant', 'cho', 'co', 'hr', 'cs', 'da', 'nl',
# 'nl-BE', 'nl-NL', 'dz', 'en', 'en-CA', 'en-IN', 'en-IE', 'en-GB', 'en-US', 'eo', 'et', 'fo', 'fj', 'fil', 'fi', 'fr', 'fr-BE',
# 'fr-CA', 'fr-FR', 'fr-CH', 'ff', 'gl', 'ka', 'de', 'de-AT', 'de-DE', 'de-CH', 'el', 'kl', 'gn', 'gu', 'ht', 'hak', 'hak-TW', 'ha',
# 'iw', 'hi', 'hi-Latn', 'ho', 'hu', 'is', 'ig', 'id', 'ia', 'ie', 'iu', 'ik', 'ga', 'it', 'ja', 'jv', 'kn', 'ks', 'kk', 'km', 'rw',
# 'tlh', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mni', 'mi', 'mr', 'mas', 'nan',
# 'nan-TW', 'lus', 'mo', 'mn', 'my', 'na', 'nv', 'ne', 'no', 'oc', 'or', 'om', 'ps', 'fa', 'fa-AF', 'fa-IR', 'pl', 'pt', 'pt-BR',
# 'pt-PT', 'pa', 'qu', 'ro', 'rm', 'rn', 'ru', 'ru-Latn', 'sm', 'sg', 'sa', 'sc', 'gd', 'sr', 'sr-Cyrl', 'sr-Latn', 'sh', 'sdp', 'sn',
# 'scn', 'sd', 'si', 'sk', 'sl', 'so', 'st', 'es', 'es-419', 'es-MX', 'es-ES', 'es-US', 'su', 'sw', 'ss', 'sv', 'tl', 'tg', 'ta',
# 'tt', 'te', 'th', 'bo', 'ti', 'tpi', 'to', 'ts', 'tn', 'tr', 'tk', 'tw', 'uk', 'ur', 'uz', 'vi', 'vo', 'vor', 'cy', 'fy', 'wo',
# 'xh', 'yi', 'yo', 'zu']
# vidl = argv
# vidl.pop(0)

# try:
# mkdir("out")
# except:
# pass

# jobs = Queue()
# for video in vidl:
# try:
# mkdir("out/"+video.strip())
# except:
# pass
# for lang in langs:
# jobs.put((lang, video, "default"))

# subthreads = []

# for r in range(50):
# subrunthread = Thread(target=subprrun, args=(jobs,mysession))
# subrunthread.start()
# subthreads.append(subrunthread)
# del subrunthread

# for xa in subthreads:
# xa.join() #bug (occurred once: the script ended before the last thread finished)
# subthreads.remove(xa)
# del xa

+ 31
- 14
worker.py View File

@@ -87,6 +87,8 @@ class GracefulKiller:

gkiller = GracefulKiller()

#TODO: discoveries, zipping, completion of subtitles

#minitasks
def threadrunner(jobs: Queue):
global langcnt
@@ -98,9 +100,35 @@ def threadrunner(jobs: Queue):
if task == "submitdiscovery":
tracker.add_item_to_tracker(args, vid)
elif task == "discovery":
pass
while True:
try:
info = getmetadata(mysession, str(vid).strip())
break
except BaseException as e:
print(e)
print("Error in retrieving information, waiting 30 seconds")
sleep(30)
if info[0] or info[1]: # ccenabled or creditdata
if not isdir("out/"+str(vid).strip()):
mkdir("out/"+str(vid).strip())
if info[1]:
open("out/"+str(vid).strip()+"/"+str(vid).strip()+"_published_credits.json", "w").write(dumps(info[1]))

if info[0]:
langcnt[vid] = 0
for langcode in langs:
jobs.put(("subtitles", vid, langcode))
else:
jobs.put(("complete", None, "video:"+vid))
elif task == "subtitles":
pass
subprrun(jobs, mysession, args, vid, "default")
langcnt[vid] += 1
if langcnt[vid] >= 195:
pass #complete(?)
elif task == "subtitles-forceedit-captions":
subprrun(jobs, mysession, args, vid, "forceedit-captions")
elif task == "subtitles-forceedit-metadata":
subprrun(jobs, mysession, args, vid, "forceedit-metadata")
elif task == "channel":
y = ydl.extract_info("https://www.youtube.com/channel/"+desit.split(":", 1)[1], download=False)
for itemyv in y["entries"]:
@@ -185,14 +213,7 @@ def prrun():
item = jobs.get()

print("Video ID:", str(item).strip())
while True:
try:
info = getmetadata(mysession, str(item).strip())
break
except BaseException as e:
print(e)
print("Error in retrieving information, waiting 30 seconds")
sleep(30)

# Add any discovered videos
recvids.update(info[2])
@@ -200,10 +221,6 @@ def prrun():
recmixes.update(info[4])
recplayl.update(info[5])

if info[0] or info[1]: # ccenabled or creditdata
if not isdir("out/"+str(item).strip()):
mkdir("out/"+str(item).strip())

if info[1]: # creditdata
open("out/"+str(item).strip()+"/"+str(item).strip()+"_published_credits.json", "w").write(dumps(info[1]))



Loading…
Cancel
Save