Datasets:
Rename and reorganize
Browse files
chall.py
CHANGED
@@ -8,6 +8,8 @@ import soundfile as sf
|
|
8 |
import importlib.util
|
9 |
|
10 |
|
|
|
|
|
11 |
_DESCRIPTION = "tbd"
|
12 |
_CITATION = "tbd"
|
13 |
|
@@ -130,7 +132,7 @@ class Chall(GeneratorBasedBuilder):
|
|
130 |
"text": Value("string"),
|
131 |
}
|
132 |
),
|
133 |
-
"audio": Audio(sampling_rate=
|
134 |
})
|
135 |
else:
|
136 |
features = Features({
|
@@ -168,7 +170,7 @@ class Chall(GeneratorBasedBuilder):
|
|
168 |
),
|
169 |
}
|
170 |
),
|
171 |
-
"audio": Audio(sampling_rate=
|
172 |
})
|
173 |
|
174 |
return DatasetInfo(
|
@@ -214,26 +216,58 @@ class Chall(GeneratorBasedBuilder):
|
|
214 |
# ),
|
215 |
]
|
216 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
@staticmethod
|
218 |
-
def
|
219 |
"""
|
|
|
|
|
|
|
220 |
|
221 |
-
:param audio_id:
|
222 |
-
:param audio_file_path:
|
223 |
-
:param data:
|
224 |
-
:param transcript:
|
225 |
-
:return:
|
226 |
"""
|
227 |
|
228 |
-
|
229 |
-
|
|
|
230 |
|
231 |
-
|
232 |
-
|
233 |
|
234 |
-
|
|
|
235 |
|
236 |
-
def
|
237 |
"""
|
238 |
Generates examples from audio segments based on the transcript provided. Each segment is processed to produce
|
239 |
an utterance which includes the audio slice and metadata.
|
@@ -242,56 +276,58 @@ class Chall(GeneratorBasedBuilder):
|
|
242 |
:param audio_file_path: The filesystem path to the audio file.
|
243 |
:param data: A dictionary containing the segments to be processed
|
244 |
:param transcript: A dictionary containing transcript details with segments of spoken words.
|
245 |
-
:return: Yields a tuple
|
246 |
"""
|
247 |
|
248 |
segments = transcript.get("segments", [])
|
|
|
249 |
|
250 |
-
|
251 |
-
segments = self._remove_to_long_pauses(segments)
|
252 |
-
|
253 |
-
if self.config.max_chunk_length is not None:
|
254 |
-
segments = self._split_long_utterances(segments)
|
255 |
-
|
256 |
-
if self.config.remove_trailing_pauses:
|
257 |
-
self._remove_trailing_pauses_in_segments(segments)
|
258 |
-
|
259 |
-
if self.config.max_pause_length is not None:
|
260 |
-
segments = self._filter_utterances_by_duration(segments, self.config.min_chunk_length, self.config.max_chunk_length)
|
261 |
|
262 |
-
|
|
|
263 |
|
264 |
-
|
|
|
|
|
265 |
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
|
270 |
-
|
|
|
|
|
|
|
271 |
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
|
276 |
-
|
277 |
-
start_time = segment["words"][0]["start"]
|
278 |
-
end_time = segment["words"][-1]["end"]
|
279 |
|
280 |
-
|
281 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
|
283 |
-
|
284 |
-
|
285 |
|
286 |
-
|
287 |
-
|
288 |
|
289 |
-
|
|
|
290 |
|
291 |
-
|
292 |
|
293 |
@staticmethod
|
294 |
-
def
|
295 |
"""
|
296 |
Removes pauses at the end/start of utterances in each segment to eliminate pauses between segments.
|
297 |
|
@@ -310,10 +346,11 @@ class Chall(GeneratorBasedBuilder):
|
|
310 |
# Remove segment if no words left
|
311 |
if not segment["words"]:
|
312 |
segments.remove(segment)
|
|
|
313 |
|
314 |
-
def
|
315 |
"""
|
316 |
-
Remove
|
317 |
|
318 |
Example (assuming (...) is longer than max_pause_length):
|
319 |
[["Hello", "(...)", "World!"]] --> [["Hello"], ["World!"]]
|
@@ -339,7 +376,7 @@ class Chall(GeneratorBasedBuilder):
|
|
339 |
return split_segments
|
340 |
|
341 |
@staticmethod
|
342 |
-
def
|
343 |
"""
|
344 |
Removes segments with invalid duration
|
345 |
:param min_duration: The minimum duration allowed for a segment.
|
@@ -357,7 +394,7 @@ class Chall(GeneratorBasedBuilder):
|
|
357 |
|
358 |
return filtered_segments
|
359 |
|
360 |
-
def
|
361 |
"""
|
362 |
Splits segments into smaller chunks if their duration exceeds the maximum chunk length specified in the config.
|
363 |
|
@@ -406,29 +443,4 @@ class Chall(GeneratorBasedBuilder):
|
|
406 |
|
407 |
return list_of_chunks
|
408 |
|
409 |
-
def _generate_examples(self, filepath, metafile):
|
410 |
-
"""
|
411 |
-
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
412 |
-
:param filepath: The path where the data is located.
|
413 |
-
:param metafile: The metafile describing the chall data
|
414 |
-
:return:
|
415 |
-
"""
|
416 |
-
|
417 |
-
logger.info("generating examples from = %s", filepath)
|
418 |
-
|
419 |
-
with open(metafile, 'r') as file:
|
420 |
-
for line in file:
|
421 |
-
data = json.loads(line)
|
422 |
-
|
423 |
-
# load transcript
|
424 |
-
transcript_file = os.path.join(filepath, data["transcript_file"])
|
425 |
-
with open(transcript_file, 'r') as transcript:
|
426 |
-
transcript = json.load(transcript)
|
427 |
-
|
428 |
-
audio_id = data['audio_id']
|
429 |
-
audio_file_path = os.path.join(filepath, data["audio_file"])
|
430 |
|
431 |
-
if self.config.split_segments:
|
432 |
-
yield from self._generate_examples_utterances(audio_id, str(audio_file_path), data, transcript)
|
433 |
-
else:
|
434 |
-
yield from self._generate_examples_segments(audio_id, str(audio_file_path), data, transcript)
|
|
|
8 |
import importlib.util
|
9 |
|
10 |
|
11 |
+
_SAMPLE_RATE = 16000
|
12 |
+
|
13 |
_DESCRIPTION = "tbd"
|
14 |
_CITATION = "tbd"
|
15 |
|
|
|
132 |
"text": Value("string"),
|
133 |
}
|
134 |
),
|
135 |
+
"audio": Audio(sampling_rate=_SAMPLE_RATE)
|
136 |
})
|
137 |
else:
|
138 |
features = Features({
|
|
|
170 |
),
|
171 |
}
|
172 |
),
|
173 |
+
"audio": Audio(sampling_rate=_SAMPLE_RATE)
|
174 |
})
|
175 |
|
176 |
return DatasetInfo(
|
|
|
216 |
# ),
|
217 |
]
|
218 |
|
219 |
+
def _generate_examples(self, filepath, metafile):
|
220 |
+
"""
|
221 |
+
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
222 |
+
:param filepath: The path where the data is located.
|
223 |
+
:param metafile: The metafile describing the chall data
|
224 |
+
:return:
|
225 |
+
"""
|
226 |
+
|
227 |
+
logger.info("generating examples from = %s", filepath)
|
228 |
+
|
229 |
+
with open(metafile, 'r') as file:
|
230 |
+
for line in file:
|
231 |
+
data = json.loads(line)
|
232 |
+
|
233 |
+
# load transcript
|
234 |
+
transcript_file = os.path.join(filepath, data["transcript_file"])
|
235 |
+
with open(transcript_file, 'r') as transcript:
|
236 |
+
transcript = json.load(transcript)
|
237 |
+
|
238 |
+
audio_id = data['audio_id']
|
239 |
+
audio_file_path = os.path.join(filepath, data["audio_file"])
|
240 |
+
|
241 |
+
if self.config.split_segments:
|
242 |
+
yield from self._generate_utterance_examples(audio_id, str(audio_file_path), data, transcript)
|
243 |
+
else:
|
244 |
+
yield from self._generate_transcript_examples(audio_id, str(audio_file_path), data, transcript)
|
245 |
+
|
246 |
@staticmethod
|
247 |
+
def _generate_transcript_examples(audio_id: str, audio_file_path: str, data: dict, transcript: dict):
|
248 |
"""
|
249 |
+
Generates examples based on the entire audio file and its associated transcript metadata. This method reads the
|
250 |
+
entire audio file, extracts speaker and segment information from the transcript, and packages these along with
|
251 |
+
the audio data into a dictionary that is then yielded.
|
252 |
|
253 |
+
:param audio_id: A unique identifier for the audio file.
|
254 |
+
:param audio_file_path: The file system path to the audio file.
|
255 |
+
:param data: A dictionary of the metadata.
|
256 |
+
:param transcript: A dictionary containing details of the transcript, including speakers and segments.
|
257 |
+
:return: Yields a tuple containing the audio ID and the enriched transcript dictionary.
|
258 |
"""
|
259 |
|
260 |
+
transcript_data = data.copy() # Create a fresh copy of data to ensure no side effects
|
261 |
+
transcript_data["speakers"] = transcript.get("speakers", [])
|
262 |
+
transcript_data["segments"] = transcript.get("segments", [])
|
263 |
|
264 |
+
with sf.SoundFile(audio_file_path) as audio_file:
|
265 |
+
audio = audio_file.read(dtype='float32')
|
266 |
|
267 |
+
transcript_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE}
|
268 |
+
yield audio_id, transcript_data
|
269 |
|
270 |
+
def _generate_utterance_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict):
|
271 |
"""
|
272 |
Generates examples from audio segments based on the transcript provided. Each segment is processed to produce
|
273 |
an utterance which includes the audio slice and metadata.
|
|
|
276 |
:param audio_file_path: The filesystem path to the audio file.
|
277 |
:param data: A dictionary containing the segments to be processed
|
278 |
:param transcript: A dictionary containing transcript details with segments of spoken words.
|
279 |
+
:return: Yields a tuple containing the audio ID and the enriched utterance dictionary.
|
280 |
"""
|
281 |
|
282 |
segments = transcript.get("segments", [])
|
283 |
+
segments = self._process_segments(segments)
|
284 |
|
285 |
+
with sf.SoundFile(audio_file_path) as track:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
+
if not track.seekable():
|
288 |
+
raise ValueError("Audio file is not seekable.")
|
289 |
|
290 |
+
for segment_i, segment in enumerate(segments):
|
291 |
+
segment_data = data.copy() # Create a fresh copy of data for each segment
|
292 |
+
segment_id = f"{audio_id}_{str(segment_i).rjust(3, '0')}"
|
293 |
|
294 |
+
segment_data["audio_id"] = segment_id
|
295 |
+
segment_data["speaker_id"] = segment["speaker"]
|
296 |
+
segment_data["words"] = segment["words"]
|
297 |
|
298 |
+
start_time = segment["words"][0]["start"]
|
299 |
+
end_time = segment["words"][-1]["end"]
|
300 |
+
start_frame = int(_SAMPLE_RATE * start_time)
|
301 |
+
frames_to_read = int(_SAMPLE_RATE * (end_time - start_time))
|
302 |
|
303 |
+
track.seek(start_frame)
|
304 |
+
audio = track.read(frames_to_read)
|
305 |
+
segment_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE}
|
306 |
|
307 |
+
yield segment_id, segment_data
|
|
|
|
|
308 |
|
309 |
+
def _process_segments(self, segments):
|
310 |
+
"""
|
311 |
+
Processes the list of segments based on configured rules.
|
312 |
+
:param segments: A list of segment dictionaries
|
313 |
+
:return: A list of processed segment dictionaries after applying all the filtering and splitting rules.
|
314 |
+
"""
|
315 |
+
if self.config.max_pause_length:
|
316 |
+
segments = self._split_and_remove_long_pauses(segments)
|
317 |
|
318 |
+
if self.config.max_chunk_length is not None:
|
319 |
+
segments = self._split_long_segments(segments)
|
320 |
|
321 |
+
if self.config.remove_trailing_pauses:
|
322 |
+
segments = self._remove_trailing_pauses(segments)
|
323 |
|
324 |
+
if self.config.max_pause_length is not None:
|
325 |
+
segments = self._filter_segments_by_duration(segments, self.config.min_chunk_length, self.config.max_chunk_length)
|
326 |
|
327 |
+
return segments
|
328 |
|
329 |
@staticmethod
|
330 |
+
def _remove_trailing_pauses(segments: List[dict]) -> List[dict]:
|
331 |
"""
|
332 |
Removes pauses at the end/start of utterances in each segment to eliminate pauses between segments.
|
333 |
|
|
|
346 |
# Remove segment if no words left
|
347 |
if not segment["words"]:
|
348 |
segments.remove(segment)
|
349 |
+
return segments
|
350 |
|
351 |
+
def _split_and_remove_long_pauses(self, segments: List[dict]) -> List[dict]:
|
352 |
"""
|
353 |
+
Remove too long pauses in a segment by splitting the segment in two segments and removing the filled pause.
|
354 |
|
355 |
Example (assuming (...) is longer than max_pause_length):
|
356 |
[["Hello", "(...)", "World!"]] --> [["Hello"], ["World!"]]
|
|
|
376 |
return split_segments
|
377 |
|
378 |
@staticmethod
|
379 |
+
def _filter_segments_by_duration(segments: List[dict], min_duration: float = None, max_duration: float = None, ):
|
380 |
"""
|
381 |
Removes segments with invalid duration
|
382 |
:param min_duration: The minimum duration allowed for a segment.
|
|
|
394 |
|
395 |
return filtered_segments
|
396 |
|
397 |
+
def _split_long_segments(self, segments: List[dict]) -> List[dict]:
|
398 |
"""
|
399 |
Splits segments into smaller chunks if their duration exceeds the maximum chunk length specified in the config.
|
400 |
|
|
|
443 |
|
444 |
return list_of_chunks
|
445 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
|
|
|
|
|
|
|