BrunoHays commited on
Commit
383231b
·
1 Parent(s): 4769095

Removed options that altered text or removed samples, added an option to merge utterances

Browse files
Files changed (1) hide show
  1. ESLO.py +44 -30
ESLO.py CHANGED
@@ -236,28 +236,28 @@ class ESLOConfig(datasets.BuilderConfig):
236
  super(ESLOConfig, self).__init__(
237
  version=datasets.Version("2.11.0", ""), name=name, **kwargs
238
  )
239
- if "no_overlap" in name:
240
- self.overlap = False
 
241
  else:
242
- self.overlap = True
243
- if "no_hesitation" in name:
244
- self.hesitation = False
245
- else:
246
- self.hesitation = True
247
 
248
 
249
  class ESLO(datasets.GeneratorBasedBuilder):
250
  """ESLO dataset."""
251
 
252
  BUILDER_CONFIGS = [
253
- ESLOConfig(name="no_overlap_no_hesitation", description="ESLO dataset, removed hesitations from samples"
254
- " and all samples with overlap"),
255
- ESLOConfig(name="no_hesitation", description="ESLO dataset, removed hesitations from samples"),
256
- ESLOConfig(name="no_overlap", description="ESLO dataset, removed all samples with overlap"),
257
- ESLOConfig(name="raw", description="ESLO dataset"),
 
 
 
258
  ]
259
 
260
- DEFAULT_CONFIG_NAME = "no_overlap_no_hesitation"
261
 
262
  def _info(self):
263
  return datasets.DatasetInfo(
@@ -319,19 +319,8 @@ class ESLO(datasets.GeneratorBasedBuilder):
319
  text += child.tail
320
  return text
321
 
322
- def clean_text(self, text: str) -> str:
323
- def replace_uppercase(match):
324
- """replaces BRUNO spelling by B R U N O"""
325
- return ' '.join(match.group(1))
326
-
327
- text = re.sub(r"\bNPERS\b", "", text)
328
- text = re.sub(r'\bOK\b', 'ok', text)
329
- text = re.sub(r'\b([A-Z]+)\b', replace_uppercase, text)
330
- if not self.config.hesitation:
331
- text = re.sub(r"(euh)|(hm)|(\b\w*\-\s)", "", text)
332
- return re.sub(r" +", " ", text).strip()
333
-
334
- def load_one(self, file) -> List[Utterance]:
335
  first_line = file.readline().decode()
336
  encoding = re.search(r'encoding=["\']([^"]+)["\']', first_line).group(1)
337
  text_content = file.read().decode(encoding)
@@ -344,7 +333,6 @@ class ESLO(datasets.GeneratorBasedBuilder):
344
  start_time = float(turn.get('startTime'))
345
  end_time = float(turn.get('endTime'))
346
  text = re.sub(r"[\r\n\s]+", " ", ESLO.extract_text(turn).strip())
347
- text = self.clean_text(text)
348
  if any(c.isalnum() for c in text):
349
  utts.append(Utterance(
350
  speaker=speaker,
@@ -380,6 +368,34 @@ class ESLO(datasets.GeneratorBasedBuilder):
380
  raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
381
  return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  @staticmethod
384
  def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
385
  return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
@@ -390,9 +406,7 @@ class ESLO(datasets.GeneratorBasedBuilder):
390
  transcript_name = os.path.splitext(os.path.basename(path))[0]
391
  audio = self.load_audio(audio_files[transcript_name])
392
  with open(path, "rb") as file:
393
- for utterance in self.load_one(file):
394
- if not self.config.overlap and utterance.overlap:
395
- continue
396
  yield f"{transcript_name}_{utterance.start_timestamp}-{utterance.end_timestamp}", {
397
  "file": transcript_name,
398
  "sentence": utterance.sentence,
 
236
  super(ESLOConfig, self).__init__(
237
  version=datasets.Version("2.11.0", ""), name=name, **kwargs
238
  )
239
+ self.single_samples = (name == "single_samples")
240
+ if not self.single_samples:
241
+ self.max_duration = float(name.split("=")[1][:-1])
242
  else:
243
+ self.max_duration = None
 
 
 
 
244
 
245
 
246
  class ESLO(datasets.GeneratorBasedBuilder):
247
  """ESLO dataset."""
248
 
249
  BUILDER_CONFIGS = [
250
+ ESLOConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"),
251
+ ESLOConfig(name="max=30s", description="samples are merged in order to reach a max duration of 30 seconds."
252
+ "Does not remove single utterances that may exceed "
253
+ "the maximum duration"),
254
+
255
+ ESLOConfig(name="max=10s", description="samples are merged in order to reach a max duration of 10 seconds"
256
+ "Does not remove single utterances that may exceed "
257
+ "the maximum duration"),
258
  ]
259
 
260
+ DEFAULT_CONFIG_NAME = "single_samples"
261
 
262
  def _info(self):
263
  return datasets.DatasetInfo(
 
319
  text += child.tail
320
  return text
321
 
322
+ @staticmethod
323
+ def load_one(file) -> List[Utterance]:
 
 
 
 
 
 
 
 
 
 
 
324
  first_line = file.readline().decode()
325
  encoding = re.search(r'encoding=["\']([^"]+)["\']', first_line).group(1)
326
  text_content = file.read().decode(encoding)
 
333
  start_time = float(turn.get('startTime'))
334
  end_time = float(turn.get('endTime'))
335
  text = re.sub(r"[\r\n\s]+", " ", ESLO.extract_text(turn).strip())
 
336
  if any(c.isalnum() for c in text):
337
  utts.append(Utterance(
338
  speaker=speaker,
 
368
  raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
369
  return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
370
 
371
+ @staticmethod
372
+ def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance:
373
+ return Utterance(
374
+ speaker="merged",
375
+ sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence),
376
+ start_timestamp=utterance1.start_timestamp,
377
+ end_timestamp=utterance2.end_timestamp,
378
+ overlap=utterance1.overlap or utterance2.overlap
379
+ )
380
+
381
+ def _merged_utterances_iterator(self, utterance_iterator):
382
+ if self.config.single_samples:
383
+ yield from utterance_iterator
384
+ merged_utterance = next(utterance_iterator)
385
+ start_time = merged_utterance.start_timestamp
386
+ while True:
387
+ try:
388
+ new_utterance = next(utterance_iterator)
389
+ except StopIteration:
390
+ yield merged_utterance
391
+ break
392
+ end_time = new_utterance.end_timestamp
393
+ if end_time - start_time > self.config.max_duration:
394
+ yield merged_utterance
395
+ merged_utterance = new_utterance
396
+ else:
397
+ merged_utterance = ESLO.merge_utterances(merged_utterance, new_utterance)
398
+
399
  @staticmethod
400
  def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
401
  return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
 
406
  transcript_name = os.path.splitext(os.path.basename(path))[0]
407
  audio = self.load_audio(audio_files[transcript_name])
408
  with open(path, "rb") as file:
409
+ for utterance in self._merged_utterances_iterator(ESLO.load_one(file)):
 
 
410
  yield f"{transcript_name}_{utterance.start_timestamp}-{utterance.end_timestamp}", {
411
  "file": transcript_name,
412
  "sentence": utterance.sentence,