|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{luong-vu-2016-non, |
|
title = "A non-expert {K}aldi recipe for {V}ietnamese Speech Recognition System", |
|
author = "Luong, Hieu-Thi and |
|
Vu, Hai-Quan", |
|
booktitle = "Proceedings of the Third International Workshop on Worldwide Language Service Infrastructure and Second Workshop on Open Infrastructures and Analysis Frameworks for Human Language Technologies ({WLSI}/{OIAF}4{HLT}2016)", |
|
month = dec, |
|
year = "2016", |
|
address = "Osaka, Japan", |
|
publisher = "The COLING 2016 Organizing Committee", |
|
url = "https://aclanthology.org/W16-5207", |
|
pages = "51--55", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for |
|
Vietnamese Automatic Speech Recognition task. |
|
The corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of. |
|
We publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems. |
|
""" |
|
|
|
_HOMEPAGE = "https://doi.org/10.5281/zenodo.7068130" |
|
|
|
_LICENSE = "CC BY-NC-SA 4.0" |
|
|
|
|
|
_DATA_URL = "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/subakko.zip" |
|
|
|
_PROMPTS_URLS = { |
|
"train": "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/train.tar.xz", |
|
"test": "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/test.tar.xz", |
|
} |
|
|
|
|
|
class Subakko(datasets.GeneratorBasedBuilder): |
|
"""VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for |
|
Vietnamese Automatic Speech Recognition task.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"speaker_id": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"sentence": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
prompts_paths = dl_manager.download_and_extract(_PROMPTS_URLS) |
|
archive = dl_manager.download(_DATA_URL) |
|
train_dir = "/subakko" |
|
test_dir = "/subakko" |
|
print("I am samin") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"prompts_path": prompts_paths["train"], |
|
"path_to_clips": train_dir, |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"prompts_path": prompts_paths["test"], |
|
"path_to_clips": test_dir, |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, prompts_path, path_to_clips, audio_files): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
examples = {} |
|
with open(prompts_path, encoding="utf-8") as f: |
|
for row in f: |
|
data = row.strip().split("\t", 1) |
|
|
|
audio_path = data[0] |
|
examples[audio_path] = { |
|
"speaker_id": speaker_id, |
|
"path": audio_path, |
|
"sentence": data[1], |
|
} |
|
inside_clips_dir = False |
|
id_ = 0 |
|
for path, f in audio_files: |
|
if path.startswith(path_to_clips): |
|
inside_clips_dir = True |
|
if path in examples: |
|
audio = {"path": path, "bytes": f.read()} |
|
yield id_, {**examples[path], "audio": audio} |
|
id_ += 1 |
|
elif inside_clips_dir: |
|
break |
|
|