marconilab's picture
Create grain_lg.py
b233ac6 verified
raw
history blame
2.34 kB
import datasets
class LugandaSpeechDataset(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description="This dataset contains speech recordings in Luganda.",
features=datasets.Features({
"sentence": datasets.Value("string"),
"language": datasets.Value("string"),
"contributor_id": datasets.Value("int64"),
"gender": datasets.Value("string"),
"age_group": datasets.Value("string"),
"voice_clip": datasets.Value("string"),
"duration": datasets.Value("float64"),
"up_votes": datasets.Value("int64"),
"down_votes": datasets.Value("int64"),
"Region": datasets.Value("string"),
"path": datasets.Audio(sampling_rate=16000),
}),
supervised_keys=None,
homepage="your_dataset_homepage",
citation="Your citation",
languages=["lg"]
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": "data/train-*"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": "data/eval-*"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": "data/test-*"},
),
]
def _generate_examples(self, filepath):
# Your logic to parse the data file and generate examples
with open(filepath, 'r', encoding='utf-8') as f:
for line in f:
data = line.strip().split(',')
yield data[0], {
"sentence": data[1],
"language": data[2],
"contributor_id": int(data[3]),
"gender": data[4],
"age_group": data[5],
"voice_clip": data[6],
"duration": float(data[7]),
"up_votes": int(data[8]),
"down_votes": int(data[9]),
"Region": data[10],
"path": data[11]
}