File size: 3,926 Bytes
843bf3c fe14fc6 843bf3c 7230110 843bf3c e4cec4f 843bf3c 1c38ec8 843bf3c 885b3a3 843bf3c 0a8ca63 843bf3c e4cec4f 45511c2 fe14fc6 843bf3c 3de74d6 375e941 749fe73 e99bfa0 749fe73 9938be4 749fe73 843bf3c 9938be4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import csv
import glob
import os
import textwrap
from dataclasses import dataclass
import tqdm
import datasets
from datasets.tasks import AutomaticSpeechRecognition
from typing import List
LANGUAGES = ["afr", "amh", "azz", "nbl", "nso", "sot", "ssw", "swa", "tos", "tsn", "tso", "ven", "wol", "xho", "xty", "zul"]
class MLSuperbConfig(datasets.BuilderConfig):
"""BuilderConfig for Superb."""
def __init__(self, name, **kwargs):
super(MLSuperbConfig, self).__init__(name=name, version=datasets.Version("2.19.0"), **kwargs)
class MLSuperb(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
URL = "https://224sh3.s3.amazonaws.com/ml_superb_subset.zip"
# BUILDER_CONFIG_CLASS = MLSuperbConfig
BUILDER_CONFIGS = [
MLSuperbConfig(
name=lang,
)
for lang in LANGUAGES
]
def _info(self):
features = datasets.Features(
{
"audio": datasets.Value("string"),
"sentence": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
version=self.config.version,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self.URL
downloaded_files = dl_manager.download_and_extract(urls_to_download)
downloaded_files = downloaded_files + "/ml_superb_subset/" + self.config.name
# downloaded_files = "./ml_superb_subset/" + self.config.name
# downloaded_files = "ml-superb-subset" "/ml_superb_subset/" + self.config.name
splits = ("train10min", "train1hr", "dev", "test")
split_to_filename = {
"train10min": 'transcript_10min_train.txt',
"train1hr": 'transcript_1h_train.txt',
"dev": 'transcript_10min_dev.txt',
"test": 'transcript_10min_test.txt',
}
split_generators = []
split_names = {
"train10min": datasets.Split.TRAIN,
"train1hr": datasets.Split.TRAIN,
"dev": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
for split in splits:
split_generators.append(
datasets.SplitGenerator(
name=split,
gen_kwargs={
'wavs_path' : downloaded_files + "/wav/",
"transcript_path": downloaded_files + "/" + split_to_filename[split],
},
),
)
return split_generators
def _generate_examples(self, wavs_path, transcript_path):
data_fields = list(self._info().features.keys())
metadata = {}
with open(transcript_path, encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
if len(next(reader)) == 1:
reader = csv.reader(f, delimiter=" ", quoting=csv.QUOTE_NONE)
for row in reader:
id_ = row[0]
if not row[0].endswith(".wav"):
row[0] += ".wav"
metadata[row[0]] = " ".join(row[2:])
yield id_, {
"audio": wavs_path + row[0],
"sentence": " ".join(row[2:]),
"id": id_,
}
else:
for row in reader:
# print(row)
id_ = row[0]
if not row[0].endswith(".wav"):
row[0] += ".wav"
metadata[row[0]] = row[-1]
yield id_, {
"audio": wavs_path + row[0],
"sentence": row[-1],
"id": id_,
} |