ahnafsamin commited on
Commit
7328d72
·
verified ·
1 Parent(s): e47c2a2

Delete subakko.py

Browse files
Files changed (1) hide show
  1. subakko.py +0 -144
subakko.py DELETED
@@ -1,144 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import datasets
17
-
18
-
19
- _CITATION = """\
20
- @inproceedings{luong-vu-2016-non,
21
- title = "A non-expert {K}aldi recipe for {V}ietnamese Speech Recognition System",
22
- author = "Luong, Hieu-Thi and
23
- Vu, Hai-Quan",
24
- booktitle = "Proceedings of the Third International Workshop on Worldwide Language Service Infrastructure and Second Workshop on Open Infrastructures and Analysis Frameworks for Human Language Technologies ({WLSI}/{OIAF}4{HLT}2016)",
25
- month = dec,
26
- year = "2016",
27
- address = "Osaka, Japan",
28
- publisher = "The COLING 2016 Organizing Committee",
29
- url = "https://aclanthology.org/W16-5207",
30
- pages = "51--55",
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
36
- Vietnamese Automatic Speech Recognition task.
37
- The corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of.
38
- We publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems.
39
- """
40
-
41
- _HOMEPAGE = "https://doi.org/10.5281/zenodo.7068130"
42
-
43
- _LICENSE = "CC BY-NC-SA 4.0"
44
-
45
- # Source data: "https://zenodo.org/record/7068130/files/vivos.tar.gz"
46
- _DATA_URL = "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/subakko.zip"
47
-
48
- _PROMPTS_URLS = {
49
- "train": "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/train.tar.xz",
50
- "test": "https://huggingface.co/datasets/ahnafsamin/SUBAK.KO/resolve/main/Data/test.tar.xz",
51
- }
52
-
53
-
54
- class Subakko(datasets.GeneratorBasedBuilder):
55
- """VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
56
- Vietnamese Automatic Speech Recognition task."""
57
-
58
- VERSION = datasets.Version("1.1.0")
59
-
60
- # This is an example of a dataset with multiple configurations.
61
- # If you don't want/need to define several sub-sets in your dataset,
62
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
63
-
64
- # If you need to make complex sub-parts in the datasets with configurable options
65
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
66
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
67
-
68
- def _info(self):
69
- return datasets.DatasetInfo(
70
- # This is the description that will appear on the datasets page.
71
- description=_DESCRIPTION,
72
- features=datasets.Features(
73
- {
74
- "speaker_id": datasets.Value("string"),
75
- "path": datasets.Value("string"),
76
- "audio": datasets.Audio(sampling_rate=16_000),
77
- "sentence": datasets.Value("string"),
78
- }
79
- ),
80
- supervised_keys=None,
81
- homepage=_HOMEPAGE,
82
- license=_LICENSE,
83
- citation=_CITATION,
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
89
-
90
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
91
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
92
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
93
- prompts_paths = dl_manager.download_and_extract(_PROMPTS_URLS)
94
- archive = dl_manager.download(_DATA_URL)
95
- train_dir = "/subakko"
96
- test_dir = "/subakko"
97
- print("I am samin")
98
- return [
99
- datasets.SplitGenerator(
100
- name=datasets.Split.TRAIN,
101
- # These kwargs will be passed to _generate_examples
102
- gen_kwargs={
103
- "prompts_path": prompts_paths["train"],
104
- "path_to_clips": train_dir,
105
- "audio_files": dl_manager.iter_archive(archive),
106
- },
107
- ),
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TEST,
110
- # These kwargs will be passed to _generate_examples
111
- gen_kwargs={
112
- "prompts_path": prompts_paths["test"],
113
- "path_to_clips": test_dir,
114
- "audio_files": dl_manager.iter_archive(archive),
115
- },
116
- ),
117
- ]
118
-
119
- def _generate_examples(self, prompts_path, path_to_clips, audio_files):
120
- """Yields examples as (key, example) tuples."""
121
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
122
- # The `key` is here for legacy reason (tfds) and is not important in itself.
123
- examples = {}
124
- with open(prompts_path, encoding="utf-8") as f:
125
- for row in f:
126
- data = row.strip().split("\t", 1)
127
- #speaker_id = data[0].split("_")[0]
128
- audio_path = data[0]
129
- examples[audio_path] = {
130
- "speaker_id": speaker_id,
131
- "path": audio_path,
132
- "sentence": data[1],
133
- }
134
- inside_clips_dir = False
135
- id_ = 0
136
- for path, f in audio_files:
137
- if path.startswith(path_to_clips):
138
- inside_clips_dir = True
139
- if path in examples:
140
- audio = {"path": path, "bytes": f.read()}
141
- yield id_, {**examples[path], "audio": audio}
142
- id_ += 1
143
- elif inside_clips_dir:
144
- break