ml-superb-subset / ml-superb-subset.py
Tolulope
adding space for xty subset
e99bfa0
import csv
import glob
import os
import textwrap
from dataclasses import dataclass
import tqdm
import datasets
from datasets.tasks import AutomaticSpeechRecognition
from typing import List
LANGUAGES = ["afr", "amh", "azz", "nbl", "nso", "sot", "ssw", "swa", "tos", "tsn", "tso", "ven", "wol", "xho", "xty", "zul"]
class MLSuperbConfig(datasets.BuilderConfig):
"""BuilderConfig for Superb."""
def __init__(self, name, **kwargs):
super(MLSuperbConfig, self).__init__(name=name, version=datasets.Version("2.19.0"), **kwargs)
class MLSuperb(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
URL = "https://224sh3.s3.amazonaws.com/ml_superb_subset.zip"
# BUILDER_CONFIG_CLASS = MLSuperbConfig
BUILDER_CONFIGS = [
MLSuperbConfig(
name=lang,
)
for lang in LANGUAGES
]
def _info(self):
features = datasets.Features(
{
"audio": datasets.Value("string"),
"sentence": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
version=self.config.version,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self.URL
downloaded_files = dl_manager.download_and_extract(urls_to_download)
downloaded_files = downloaded_files + "/ml_superb_subset/" + self.config.name
# downloaded_files = "./ml_superb_subset/" + self.config.name
# downloaded_files = "ml-superb-subset" "/ml_superb_subset/" + self.config.name
splits = ("train10min", "train1hr", "dev", "test")
split_to_filename = {
"train10min": 'transcript_10min_train.txt',
"train1hr": 'transcript_1h_train.txt',
"dev": 'transcript_10min_dev.txt',
"test": 'transcript_10min_test.txt',
}
split_generators = []
split_names = {
"train10min": datasets.Split.TRAIN,
"train1hr": datasets.Split.TRAIN,
"dev": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
for split in splits:
split_generators.append(
datasets.SplitGenerator(
name=split,
gen_kwargs={
'wavs_path' : downloaded_files + "/wav/",
"transcript_path": downloaded_files + "/" + split_to_filename[split],
},
),
)
return split_generators
def _generate_examples(self, wavs_path, transcript_path):
data_fields = list(self._info().features.keys())
metadata = {}
with open(transcript_path, encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
if len(next(reader)) == 1:
reader = csv.reader(f, delimiter=" ", quoting=csv.QUOTE_NONE)
for row in reader:
id_ = row[0]
if not row[0].endswith(".wav"):
row[0] += ".wav"
metadata[row[0]] = " ".join(row[2:])
yield id_, {
"audio": wavs_path + row[0],
"sentence": " ".join(row[2:]),
"id": id_,
}
else:
for row in reader:
# print(row)
id_ = row[0]
if not row[0].endswith(".wav"):
row[0] += ".wav"
metadata[row[0]] = row[-1]
yield id_, {
"audio": wavs_path + row[0],
"sentence": row[-1],
"id": id_,
}