fix vp_nel
Browse files- data/slue-vp_nel_blind.zip +2 -2
- slue-phase-2.py +9 -8
data/slue-vp_nel_blind.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62a1fd6d01a19a08042bd7e1ae8549c08db67001cecf0fd43c96d4da3154e1d7
|
3 |
+
size 168568066
|
slue-phase-2.py
CHANGED
@@ -58,9 +58,9 @@ For questions from the other 4 datasets, their question texts, answer strings, a
|
|
58 |
|
59 |
SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia.
|
60 |
=======================================================
|
61 |
-
SLUE-
|
62 |
|
63 |
-
SLUE-
|
64 |
For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format.
|
65 |
=======================================================
|
66 |
|
@@ -106,6 +106,8 @@ def load_word2time(word2time_file):
|
|
106 |
|
107 |
def parse_nel_time_spans(nel_timestamps):
|
108 |
nel_timestamps = ast.literal_eval(nel_timestamps)
|
|
|
|
|
109 |
return [
|
110 |
{
|
111 |
"ne_label": ne,
|
@@ -157,7 +159,7 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
157 |
),
|
158 |
SLUE2Config(
|
159 |
name="vp_nel",
|
160 |
-
description="SLUE-
|
161 |
),
|
162 |
]
|
163 |
|
@@ -209,10 +211,9 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
209 |
elif self.config.name == "vp_nel":
|
210 |
features = {
|
211 |
"id": datasets.Value("string"),
|
212 |
-
"split": datasets.Value("string"),
|
213 |
"audio": datasets.Audio(sampling_rate=16_000),
|
214 |
"speaker_id": datasets.Value("string"),
|
215 |
-
"
|
216 |
"word_timestamps": datasets.Sequence(
|
217 |
{
|
218 |
"word": datasets.Value("string"),
|
@@ -220,7 +221,7 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
220 |
"end_sec": datasets.Value("float64"),
|
221 |
}
|
222 |
),
|
223 |
-
"
|
224 |
{
|
225 |
"ne_label": datasets.Value("string"),
|
226 |
"start_char_idx": datasets.Value("int32"),
|
@@ -247,7 +248,6 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
247 |
|
248 |
dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name])
|
249 |
data_dir = os.path.join(dl_dir, config_name)
|
250 |
-
print(data_dir)
|
251 |
|
252 |
splits = []
|
253 |
if self.config.name in ["hvb", "sqa5"]:
|
@@ -352,7 +352,7 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
352 |
"word2time": load_word2time(word2time_file),
|
353 |
"answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")),
|
354 |
}
|
355 |
-
elif self.config.name == "
|
356 |
split = "test" if "test" in filepath else "dev"
|
357 |
utt_id = row["id"]
|
358 |
word_alignments_fn = os.path.join(
|
@@ -360,6 +360,7 @@ class SLUE2(datasets.GeneratorBasedBuilder):
|
|
360 |
)
|
361 |
audio_file = os.path.join(
|
362 |
data_dir,
|
|
|
363 |
split,
|
364 |
f"{utt_id}.ogg",
|
365 |
)
|
|
|
58 |
|
59 |
SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia.
|
60 |
=======================================================
|
61 |
+
SLUE-vp_nel Dataset
|
62 |
|
63 |
+
SLUE-vp_nel includes word-level time stamps for dev and test splits of the SLUE-voxpopuli corpus.
|
64 |
For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format.
|
65 |
=======================================================
|
66 |
|
|
|
106 |
|
107 |
def parse_nel_time_spans(nel_timestamps):
|
108 |
nel_timestamps = ast.literal_eval(nel_timestamps)
|
109 |
+
if nel_timestamps is None:
|
110 |
+
return []
|
111 |
return [
|
112 |
{
|
113 |
"ne_label": ne,
|
|
|
159 |
),
|
160 |
SLUE2Config(
|
161 |
name="vp_nel",
|
162 |
+
description="SLUE-vp_nel set with named entity labels and time-stamps.",
|
163 |
),
|
164 |
]
|
165 |
|
|
|
211 |
elif self.config.name == "vp_nel":
|
212 |
features = {
|
213 |
"id": datasets.Value("string"),
|
|
|
214 |
"audio": datasets.Audio(sampling_rate=16_000),
|
215 |
"speaker_id": datasets.Value("string"),
|
216 |
+
"text": datasets.Value("string"),
|
217 |
"word_timestamps": datasets.Sequence(
|
218 |
{
|
219 |
"word": datasets.Value("string"),
|
|
|
221 |
"end_sec": datasets.Value("float64"),
|
222 |
}
|
223 |
),
|
224 |
+
"ne_timestamps": datasets.Sequence(
|
225 |
{
|
226 |
"ne_label": datasets.Value("string"),
|
227 |
"start_char_idx": datasets.Value("int32"),
|
|
|
248 |
|
249 |
dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name])
|
250 |
data_dir = os.path.join(dl_dir, config_name)
|
|
|
251 |
|
252 |
splits = []
|
253 |
if self.config.name in ["hvb", "sqa5"]:
|
|
|
352 |
"word2time": load_word2time(word2time_file),
|
353 |
"answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")),
|
354 |
}
|
355 |
+
elif self.config.name == "vp_nel":
|
356 |
split = "test" if "test" in filepath else "dev"
|
357 |
utt_id = row["id"]
|
358 |
word_alignments_fn = os.path.join(
|
|
|
360 |
)
|
361 |
audio_file = os.path.join(
|
362 |
data_dir,
|
363 |
+
'audio',
|
364 |
split,
|
365 |
f"{utt_id}.ogg",
|
366 |
)
|