Browse Source

Further work on html.parser

pull/3/head
tech234a 3 years ago
parent
commit
6177c02467
2 changed files with 53 additions and 49 deletions
  1. +50
    -48
      export.py
  2. +3
    -1
      worker.py

+ 50
- 48
export.py View File

@@ -25,8 +25,6 @@ def timedelta_to_sbv_timestamp(timedelta_timestamp):
return "%1d:%02d:%02d.%03d" % (hrs, mins, secs, msecs)


from bs4 import BeautifulSoup
import html.parser
from datetime import timedelta

from json import dumps
@@ -38,7 +36,12 @@ from html.parser import HTMLParser

class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.captions = []
self.captiontext = True
self.title = ""
self.description = ""


def check_attr(self, attrs, attr, value):
for item in attrs:
@@ -53,22 +56,36 @@ class MyHTMLParser(HTMLParser):
return False

def handle_starttag(self, tag, attrs):
if tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-start-time"):
self.captions.append({"startTime": self.get_attr("data-start-ms")})
elif tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-end-time"):
self.captions[len(self.captions-1)]["endTime"] = self.get_attr("data-end-ms")})
elif tag == "textarea" and self.check_attr(attrs, "class", "yt-uix-form-input-textarea event-text goog-textarea"):
pass #do this

#def handle_endtag(self, tag):
# print("Encountered an end tag :", tag)
if tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-start-time") and not ' data-segment-id="" ' in self.get_starttag_text():
self.captions.append({"startTime": int(self.get_attr(attrs, "data-start-ms")), "text": ""})
elif tag == "input" and self.check_attr(attrs, "class", "yt-uix-form-input-text event-time-field event-end-time") and not ' data-segment-id="" ' in self.get_starttag_text():
self.captions[len(self.captions)-1]["endTime"] = int(self.get_attr(attrs, "data-end-ms"))
# elif tag == "textarea" and self.check_attr(attrs, "class", "yt-uix-form-input-textarea event-text goog-textarea"):
# if len(self.captions):
# self.datatarget = len(self.captions)-1
# else:
# self.datatarget = 0
elif tag == "input" and self.check_attr(attrs, "id", "metadata-title"):
self.title = self.get_attr(attrs, "value")
# elif tag == "textarea" and self.check_attr(attrs, "id", "metadata-description"):
# self.datatarget = "description"

# def handle_endtag(self, tag):
# if tag == "textarea":
# self.datatarget = None

def handle_data(self, data):
print("Encountered some data :", data)
if self.get_starttag_text() and self.get_starttag_text().startswith("<textarea "):
if 'name="serve_text"' in self.get_starttag_text() and not 'data-segment-id=""' in self.get_starttag_text():
self.captions[len(self.captions)-1]["text"] += data
self.captiontext = True
elif 'id="metadata-description"' in self.get_starttag_text():
self.description += data

def subprrun(jobs, headers):
while not jobs.empty():
langcode, vid = jobs.get()
vid = vid.strip()
print(langcode, vid)
pparams = (
("v", vid),
@@ -86,46 +103,31 @@ def subprrun(jobs, headers):

parser = MyHTMLParser()
parser.feed(page.text)
#soup = BeautifulSoup(page.text, features="html5lib")
#del page

divs = soup.find_all("div", class_="timed-event-line")

myfs = open("out/"+vid+"/"+vid+"_"+langcode+".sbv", "w", encoding="utf-8")
while divs:
item = divs.pop(0)
text = item.find("textarea").text
startms = int(item.find("input", class_="event-start-time")["data-start-ms"])
endms = int(item.find("input", class_="event-end-time")["data-end-ms"])

myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=startms)) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=endms)) + "\n" + text + "\n")
#text.decompose()
item.decompose()
del item
del text
del startms
del endms
if divs:
myfs.write("\n")
del divs
myfs.close()
del myfs

if soup.find("li", id="captions-editor-nav-metadata")["data-state"] != "locked":
del page

if parser.captiontext:
myfs = open("out/"+vid+"/"+vid+"_"+langcode+".sbv", "w", encoding="utf-8")
captions = parser.captions
captions.pop(0) #get rid of the fake one
while captions:
item = captions.pop(0)

myfs.write(timedelta_to_sbv_timestamp(timedelta(milliseconds=item["startTime"])) + "," + timedelta_to_sbv_timestamp(timedelta(milliseconds=item["endTime"])) + "\n" + item["text"][:-9] + "\n")
del item
if captions:
myfs.write("\n")
del captions
myfs.close()
del myfs

if parser.title or parser.description:
metadata = {}

try:
metadata["title"] = soup.find("input", id="metadata-title")["value"]
except KeyError:
metadata["title"] = ""
metadata["description"] = soup.find("textarea", id="metadata-description").text

metadata["title"] = parser.title
metadata["description"] = parser.description[:-16]
open("out/"+vid+"/"+vid+"_"+langcode+".json", "w", encoding="utf-8").write(dumps(metadata))
del metadata

soup.decompose()
del soup
del langcode
del vid
del pparams


+ 3
- 1
worker.py View File

@@ -168,9 +168,11 @@ while True:
del subrunthread

for xa in subthreads:
xa.join()
xa.join() #bug (occurred once: the script ended before the last thread finished)
subthreads.remove(xa)
del xa

sleep(1)
# while True:
# gsres = False
# try:


Loading…
Cancel
Save