File size: 5,273 Bytes
0e739cb 77ad90c 0e739cb 3cc588b 0e739cb b2da923 0e739cb de5127f 0e739cb de5127f 0e739cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import csv
import os
import datasets
from tqdm import tqdm
from .accents import ACCENTS
from .release_stats import STATS
_HOMEPAGE = "https://huggingface.co/datasets/RitchieP/VerbaLex_voice"
_LICENSE = "https://choosealicense.com/licenses/apache-2.0/"
_BASE_URL = "https://huggingface.co/datasets/RitchieP/VerbaLex_voice/resolve/main/"
_AUDIO_URL = _BASE_URL + "audio/{accent}/{split}/{accent}_{split}.tar"
_TRANSCRIPT_URL = _BASE_URL + "transcript/{accent}/{split}.tsv"
_CITATION = """\
"""
class VerbaLexVoiceConfig(datasets.BuilderConfig):
def __init__(self, name, version, **kwargs):
self.accent = kwargs.pop("accent", None)
self.num_speakers = kwargs.pop("num_speakers", None)
self.num_files = kwargs.pop("num_files", None)
description = (
f"VerbaLex Voice english speech-to-text dataset in {self.accent} accent."
)
super(VerbaLexVoiceConfig, self).__init__(
name=name,
version=datasets.Version(version),
description=description,
**kwargs,
)
class VerbaLexVoiceDataset(datasets.GeneratorBasedBuilder):
"""
VerbaLex is a dataset containing different English accents from non-native English speakers.
This dataset is created directly from the L2-Arctic dataset.
"""
BUILDER_CONFIGS = [
VerbaLexVoiceConfig(
name=accent,
version=STATS["version"],
accent=ACCENTS[accent],
num_speakers=accent_stats["numOfSpeaker"],
num_files=accent_stats["numOfWavFiles"]
)
for accent, accent_stats in STATS["accents"].items()
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
description=(
"VerbaLex Voice is a speech dataset focusing on accented English speech."
"It specifically targets speeches from speakers that is a non-native English speaker."
),
features=datasets.Features(
{
"path": datasets.Value("string"),
"sentence": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=44_100)
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators"""
accent = self.config.name
splits = ("train", "test")
audio_urls = {}
for split in splits:
audio_urls[split] = _AUDIO_URL.format(accent=accent, split=split)
archive_paths = dl_manager.download(audio_urls)
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
meta_urls = {split: _TRANSCRIPT_URL.format(accent=accent, split=split) for split in splits}
meta_paths = dl_manager.download_and_extract(meta_urls)
split_names = {
"train": datasets.Split.TRAIN,
"test": datasets.Split.TEST
}
split_generators = []
for split in splits:
split_local_extract_archive_paths = local_extracted_archive_paths.get(split)
if not isinstance(split_local_extract_archive_paths, list):
split_local_extract_archive_paths = [split_local_extract_archive_paths]
split_archives = archive_paths.get(split)
if not isinstance(split_archives, list):
split_archives = [split_archives]
split_generators.append(
datasets.SplitGenerator(
name=split_names.get(split, split),
gen_kwargs={
"local_extracted_archive_paths": split_local_extract_archive_paths,
"archives": [dl_manager.iter_archive(path) for path in split_archives],
"meta_path": meta_paths[split]
}
)
)
return split_generators
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
data_fields = list(self._info().features.keys())
metadata = {}
with open(meta_path, encoding="UTF-8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in tqdm(reader, desc="Reading metadata..."):
if not row["path"].endswith(".wav"):
row["path"] += ".wav"
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["path"]] = row
for i, audio_archive in enumerate(archives):
for path, file in audio_archive:
_, filename = os.path.split(path)
if filename in metadata:
result = dict(metadata[filename])
path = os.path.join(local_extracted_archive_paths[i],
path) if local_extracted_archive_paths else path
result["audio"] = {"path": path, "bytes": file.read()}
result["path"] = path
yield path, result
|