cqlizhijun commited on
Commit
cd22391
·
1 Parent(s): 109a03d

Create superb_demo.py

Browse files
Files changed (1) hide show
  1. superb_demo.py +183 -0
superb_demo.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+ import csv
20
+ import glob
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets.tasks import AutomaticSpeechRecognition
26
+
27
+ _CITATION = """\
28
+ @article{DBLP:journals/corr/abs-2105-01051,
29
+ author = {Zhi{-}Jun Lee and
30
+ Jia{-}Jie Sehn},
31
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
32
+ journal = {CoRR},
33
+ volume = {abs/2105.01051},
34
+ year = {2021},
35
+ url = {https://arxiv.org/abs/2105.01051},
36
+ archivePrefix = {arXiv},
37
+ eprint = {2105.01051},
38
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
39
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
40
+ bibsource = {dblp computer science bibliography, https://dblp.org}
41
+ }
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ Self-supervised learning (SSL) has proven vital for advancing research in
46
+ natural language processing (NLP) and computer vision (CV). The paradigm
47
+ pretrains a shared model on large volumes of unlabeled data and achieves
48
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
49
+ speech processing community lacks a similar setup to systematically explore the
50
+ paradigm. To bridge this gap, we introduce Speech processing Universal
51
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
52
+ performance of a shared model across a wide range of speech processing tasks
53
+ with minimal architecture changes and labeled data. Among multiple usages of the
54
+ shared model, we especially focus on extracting the representation learned from
55
+ SSL due to its preferable re-usability. We present a simple framework to solve
56
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
57
+ the frozen shared model. Our results demonstrate that the framework is promising
58
+ as SSL representations show competitive generalizability and accessibility
59
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
60
+ benchmark toolkit to fuel the research in representation learning and general
61
+ speech processing.
62
+ Note that in order to limit the required storage for preparing this dataset, the
63
+ audio is stored in the .flac format and is not converted to a float32 array. To
64
+ convert, the audio file to a float32 array, please make use of the `.map()`
65
+ function as follows:
66
+ ```python
67
+ import soundfile as sf
68
+ def map_to_array(batch):
69
+ speech_array, _ = sf.read(batch["file"])
70
+ batch["speech"] = speech_array
71
+ return batch
72
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
73
+ ```
74
+ """
75
+
76
+
77
+ class SuperbConfig(datasets.BuilderConfig):
78
+ """BuilderConfig for Superb."""
79
+
80
+ def __init__(
81
+ self,
82
+ features,
83
+ url,
84
+ data_url=None,
85
+ supervised_keys=None,
86
+ task_templates=None,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
90
+ self.features = features
91
+ self.data_url = data_url
92
+ self.url = url
93
+ self.supervised_keys = supervised_keys
94
+ self.task_templates = task_templates
95
+
96
+
97
+ class Superb(datasets.GeneratorBasedBuilder):
98
+ """Superb dataset."""
99
+
100
+ BUILDER_CONFIGS = [
101
+ SuperbConfig(
102
+ name="ks",
103
+ description=textwrap.dedent(
104
+ """\
105
+ Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
106
+ words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
107
+ inference time are all crucial. SUPERB uses the widely used Speech Commands dataset v1.0 for the task.
108
+ The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
109
+ false positive. The evaluation metric is accuracy (ACC)"""
110
+ ),
111
+ features=datasets.Features(
112
+ {
113
+ "file": datasets.Value("string"),
114
+ "audio": datasets.features.Audio(sampling_rate=16_000),
115
+ "label": datasets.ClassLabel(
116
+ names=[
117
+ "neunit",
118
+ "wake",
119
+ "_unknown_",
120
+ ]
121
+ ),
122
+ }
123
+ ),
124
+ supervised_keys=("file", "label"),
125
+ url="https://www.tensorflow.org/datasets/catalog/speech_commands",
126
+ data_url="data/speech_commands_test_set_v0.01.zip",
127
+ ),
128
+ ]
129
+
130
+ def _info(self):
131
+ return datasets.DatasetInfo(
132
+ description=_DESCRIPTION,
133
+ features=self.config.features,
134
+ supervised_keys=self.config.supervised_keys,
135
+ homepage=self.config.url,
136
+ citation=_CITATION,
137
+ task_templates=self.config.task_templates,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager):
141
+ if self.config.name == "ks":
142
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
143
+ return [
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
146
+ ),
147
+ ]
148
+
149
+ def _generate_examples(self, archive_path, split=None):
150
+ """Generate examples."""
151
+ if self.config.name == "ks":
152
+ words = ["neunit", "wake"]
153
+ splits = _split_ks_files(archive_path, split)
154
+ for key, audio_file in enumerate(sorted(splits[split])):
155
+ base_dir, file_name = os.path.split(audio_file)
156
+ _, word = os.path.split(base_dir)
157
+ if word in words:
158
+ label = word
159
+ else:
160
+ label = "_unknown_"
161
+ yield key, {"file": audio_file, "audio": audio_file, "label": label}
162
+
163
+ def _split_ks_files(archive_path, split):
164
+ audio_path = os.path.join(archive_path, "**/*.wav")
165
+ audio_paths = glob.glob(audio_path)
166
+ if split == "test":
167
+ # use all available files for the test archive
168
+ return {"test": audio_paths}
169
+
170
+ val_list_file = os.path.join(archive_path, "validation_list.txt")
171
+ test_list_file = os.path.join(archive_path, "testing_list.txt")
172
+ with open(val_list_file, encoding="utf-8") as f:
173
+ val_paths = f.read().strip().splitlines()
174
+ val_paths = [os.path.join(archive_path, p) for p in val_paths]
175
+ with open(test_list_file, encoding="utf-8") as f:
176
+ test_paths = f.read().strip().splitlines()
177
+ test_paths = [os.path.join(archive_path, p) for p in test_paths]
178
+
179
+ # the paths for the train set is just whichever paths that do not exist in
180
+ # either the test or validation splits
181
+ train_paths = list(set(audio_paths) - set(val_paths) - set(test_paths))
182
+
183
+ return {"train": train_paths, "val": val_paths}