|
import os |
|
import random |
|
import datasets |
|
|
|
|
|
_DOMAIN = "https://pan.ai-hobbyist.com/d/Wuthering Waves Datasets" |
|
|
|
_URLS = { |
|
"zh": "中文 - Chinese", |
|
"jp": "日语 - Japanese", |
|
"en": "英语 - English", |
|
"kr": "韩语 - Korean", |
|
} |
|
|
|
|
|
class wwTTS(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
if self.config.name == "default": |
|
self.config.name = "椿" |
|
|
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"speech": datasets.Audio(sampling_rate=44_100), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("speech", "text"), |
|
homepage=f"https://www.modelscope.cn/datasets/Genius-Society/{os.path.basename(__file__)[:-3]}", |
|
license="CC-BY-NC-ND", |
|
version="0.0.1", |
|
) |
|
|
|
def _get_txt(self, file_path: str): |
|
lab_path = file_path.replace(".wav", ".lab") |
|
with open(lab_path, "r", encoding="utf-8") as file: |
|
content = file.read() |
|
|
|
return content.strip() |
|
|
|
def _split_generators(self, dl_manager): |
|
datasplits = [] |
|
for region in _URLS: |
|
url = f"{_DOMAIN}/{_URLS[region]}/{self.config.name}.7z" |
|
try: |
|
data_files = dl_manager.download_and_extract(url) |
|
except Exception as e: |
|
print(f"{e}, retrying...") |
|
data_files = dl_manager.download_and_extract(url) |
|
|
|
if os.path.isdir(data_files): |
|
files = [] |
|
for path in dl_manager.iter_files([data_files]): |
|
if os.path.basename(path).endswith(".wav"): |
|
files.append( |
|
{ |
|
"speech": path, |
|
"text": self._get_txt(path), |
|
} |
|
) |
|
|
|
random.shuffle(files) |
|
datasplits.append( |
|
datasets.SplitGenerator( |
|
name=region, |
|
gen_kwargs={"files": files}, |
|
) |
|
) |
|
|
|
return datasplits |
|
|
|
def _generate_examples(self, files): |
|
for i, path in enumerate(files): |
|
yield i, path |
|
|