KaraKaraWitch
commited on
Commit
•
80462dc
1
Parent(s):
840b82f
Upload Script
Browse files- sub2txt.py +140 -0
sub2txt.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
import pathlib
|
3 |
+
import srt
|
4 |
+
import typer
|
5 |
+
import re
|
6 |
+
import orjson
|
7 |
+
|
8 |
+
app = typer.Typer()
|
9 |
+
|
10 |
+
alphabets = "([A-Za-z])"
|
11 |
+
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
|
12 |
+
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
|
13 |
+
starters = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
|
14 |
+
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
|
15 |
+
websites = "[.](com|net|org|io|gov|edu|me)"
|
16 |
+
digits = "([0-9])"
|
17 |
+
multiple_dots = r"\.{2,}"
|
18 |
+
|
19 |
+
|
20 |
+
def split_into_sentences(text: str) -> list[str]:
|
21 |
+
"""
|
22 |
+
Split the text into sentences.
|
23 |
+
|
24 |
+
If the text contains substrings "<prd>" or "<stop>", they would lead
|
25 |
+
to incorrect splitting because they are used as markers for splitting.
|
26 |
+
|
27 |
+
:param text: text to be split into sentences
|
28 |
+
:type text: str
|
29 |
+
|
30 |
+
:return: list of sentences
|
31 |
+
:rtype: list[str]
|
32 |
+
"""
|
33 |
+
text = " " + text + " "
|
34 |
+
text = text.replace("\n", " ")
|
35 |
+
text = re.sub(prefixes, "\\1<prd>", text)
|
36 |
+
text = re.sub(websites, "<prd>\\1", text)
|
37 |
+
text = re.sub(digits + "[.]" + digits, "\\1<prd>\\2", text)
|
38 |
+
text = re.sub(
|
39 |
+
multiple_dots, lambda match: "<prd>" * len(match.group(0)) + "<stop>", text
|
40 |
+
)
|
41 |
+
if "Ph.D" in text:
|
42 |
+
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
|
43 |
+
text = re.sub(r"\s" + alphabets + "[.] ", " \\1<prd> ", text)
|
44 |
+
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
|
45 |
+
text = re.sub(
|
46 |
+
alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]",
|
47 |
+
"\\1<prd>\\2<prd>\\3<prd>",
|
48 |
+
text,
|
49 |
+
)
|
50 |
+
text = re.sub(alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>", text)
|
51 |
+
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
|
52 |
+
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
|
53 |
+
text = re.sub(" " + alphabets + "[.]", " \\1<prd>", text)
|
54 |
+
if "”" in text:
|
55 |
+
text = text.replace(".”", "”.")
|
56 |
+
if '"' in text:
|
57 |
+
text = text.replace('."', '".')
|
58 |
+
if "!" in text:
|
59 |
+
text = text.replace('!"', '"!')
|
60 |
+
if "?" in text:
|
61 |
+
text = text.replace('?"', '"?')
|
62 |
+
text = text.replace(".", ".<stop>")
|
63 |
+
text = text.replace("?", "?<stop>")
|
64 |
+
text = text.replace("!", "!<stop>")
|
65 |
+
text = text.replace("<prd>", ".")
|
66 |
+
sentences = text.split("<stop>")
|
67 |
+
sentences = [s.strip() for s in sentences]
|
68 |
+
if sentences and not sentences[-1]:
|
69 |
+
sentences = sentences[:-1]
|
70 |
+
return sentences
|
71 |
+
|
72 |
+
|
73 |
+
@app.command()
|
74 |
+
def srt_folder(folder: pathlib.Path, output_file: pathlib.Path):
|
75 |
+
with open(output_file,"wb") as f:
|
76 |
+
for file in folder.rglob("*.srt"):
|
77 |
+
if "(576p" in file.stem:
|
78 |
+
things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(576p")[0]
|
79 |
+
elif "(1080p" in file.stem:
|
80 |
+
things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(1080p")[0]
|
81 |
+
else:
|
82 |
+
print(file.stem,"Missing trailing?")
|
83 |
+
things = [i.strip() for i in things_string.split(";")]
|
84 |
+
dict_content = srt_file(file, None, as_dict=True)
|
85 |
+
dict_content["meta"]["things"] = things
|
86 |
+
del dict_content["text"]
|
87 |
+
dict_content["text"] = dict_content["meta"]["list_sentences"]
|
88 |
+
del dict_content["meta"]["list_sentences"]
|
89 |
+
f.write(orjson.dumps(dict_content) + b"\n")
|
90 |
+
|
91 |
+
|
92 |
+
@app.command()
|
93 |
+
def srt_file(file: pathlib.Path, output_file: pathlib.Path, as_dict: bool = False):
|
94 |
+
data = file.read_text(encoding="utf-8")
|
95 |
+
sub_lines = list(srt.parse(data))
|
96 |
+
raw_content = ""
|
97 |
+
for sub in sub_lines:
|
98 |
+
sub_content = sub.content.lower()
|
99 |
+
if "captions by" in sub_content:
|
100 |
+
continue
|
101 |
+
if "captions paid for" in sub_content:
|
102 |
+
continue
|
103 |
+
if sub_content.startswith("narrator"):
|
104 |
+
sub_content = sub_content.split("narrator:")[1].strip()
|
105 |
+
# >> narrator:
|
106 |
+
if sub_content.startswith(">> narrator:"):
|
107 |
+
sub_content = sub_content.split(">> narrator:")[1].strip()
|
108 |
+
# >>
|
109 |
+
if sub_content.startswith(">>"):
|
110 |
+
sub_content = sub_content[2:].strip()
|
111 |
+
raw_content += sub_content.replace("\\N", " ").replace(" ", " ") + " "
|
112 |
+
raw_content = raw_content.replace(" --", "-- ").replace("♪","").replace(" ", " ")
|
113 |
+
sents = split_into_sentences(raw_content)
|
114 |
+
sents = [s[0].upper() + s[1:] for s in sents]
|
115 |
+
z = len(sents)
|
116 |
+
for sent in deepcopy(sents):
|
117 |
+
sent = re.sub(r"\[ .*? \]","",sent).strip()
|
118 |
+
if not sent.strip("."):
|
119 |
+
continue
|
120 |
+
if "have any comments about the show" in sent:
|
121 |
+
continue
|
122 |
+
if "have any comments," in sent:
|
123 |
+
continue
|
124 |
+
if "have any questions about the show" in sent:
|
125 |
+
continue
|
126 |
+
if "drop us a line at" in sent:
|
127 |
+
continue
|
128 |
+
else:
|
129 |
+
sents.append(sent)
|
130 |
+
sents = sents[z:]
|
131 |
+
# print(sents)
|
132 |
+
if as_dict:
|
133 |
+
return {"text": " ".join(sents), "meta": {"list_sentences": sents}}
|
134 |
+
output_file.write_bytes(
|
135 |
+
orjson.dumps({"text": " ".join(sents), "meta": {"list_sentences": sents}})
|
136 |
+
)
|
137 |
+
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
app()
|