lhoestq HF Staff commited on
Commit
25942d9
·
verified ·
1 Parent(s): 1ce76d6

Delete loading script

Browse files
Files changed (1) hide show
  1. superb_demo.py +0 -431
superb_demo.py DELETED
@@ -1,431 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SUPERB: Speech processing Universal PERformance Benchmark."""
18
-
19
- import csv
20
- import glob
21
- import os
22
- import textwrap
23
-
24
- import datasets
25
-
26
- _CITATION = """\
27
- @article{DBLP:journals/corr/abs-2105-01051,
28
- author = {Shu{-}Wen Yang and
29
- Po{-}Han Chi and
30
- Yung{-}Sung Chuang and
31
- Cheng{-}I Jeff Lai and
32
- Kushal Lakhotia and
33
- Yist Y. Lin and
34
- Andy T. Liu and
35
- Jiatong Shi and
36
- Xuankai Chang and
37
- Guan{-}Ting Lin and
38
- Tzu{-}Hsien Huang and
39
- Wei{-}Cheng Tseng and
40
- Ko{-}tik Lee and
41
- Da{-}Rong Liu and
42
- Zili Huang and
43
- Shuyan Dong and
44
- Shang{-}Wen Li and
45
- Shinji Watanabe and
46
- Abdelrahman Mohamed and
47
- Hung{-}yi Lee},
48
- title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
49
- journal = {CoRR},
50
- volume = {abs/2105.01051},
51
- year = {2021},
52
- url = {https://arxiv.org/abs/2105.01051},
53
- archivePrefix = {arXiv},
54
- eprint = {2105.01051},
55
- timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
56
- biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
57
- bibsource = {dblp computer science bibliography, https://dblp.org}
58
- }
59
- """
60
-
61
- _DESCRIPTION = """\
62
- Self-supervised learning (SSL) has proven vital for advancing research in
63
- natural language processing (NLP) and computer vision (CV). The paradigm
64
- pretrains a shared model on large volumes of unlabeled data and achieves
65
- state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
66
- speech processing community lacks a similar setup to systematically explore the
67
- paradigm. To bridge this gap, we introduce Speech processing Universal
68
- PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
69
- performance of a shared model across a wide range of speech processing tasks
70
- with minimal architecture changes and labeled data. Among multiple usages of the
71
- shared model, we especially focus on extracting the representation learned from
72
- SSL due to its preferable re-usability. We present a simple framework to solve
73
- SUPERB tasks by learning task-specialized lightweight prediction heads on top of
74
- the frozen shared model. Our results demonstrate that the framework is promising
75
- as SSL representations show competitive generalizability and accessibility
76
- across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
77
- benchmark toolkit to fuel the research in representation learning and general
78
- speech processing.
79
-
80
- Note that in order to limit the required storage for preparing this dataset, the
81
- audio is stored in the .flac format and is not converted to a float32 array. To
82
- convert, the audio file to a float32 array, please make use of the `.map()`
83
- function as follows:
84
-
85
-
86
- ```python
87
- import soundfile as sf
88
-
89
- def map_to_array(batch):
90
- speech_array, _ = sf.read(batch["file"])
91
- batch["speech"] = speech_array
92
- return batch
93
-
94
- dataset = dataset.map(map_to_array, remove_columns=["file"])
95
- ```
96
- """
97
-
98
-
99
- class SuperbConfig(datasets.BuilderConfig):
100
- """BuilderConfig for Superb."""
101
-
102
- def __init__(
103
- self,
104
- features,
105
- url,
106
- data_url=None,
107
- supervised_keys=None,
108
- **kwargs,
109
- ):
110
- super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
111
- self.features = features
112
- self.data_url = data_url
113
- self.url = url
114
- self.supervised_keys = supervised_keys
115
-
116
-
117
- class Superb(datasets.GeneratorBasedBuilder):
118
- """Superb dataset."""
119
-
120
- BUILDER_CONFIGS = [
121
- SuperbConfig(
122
- name="asr",
123
- description=textwrap.dedent(
124
- """\
125
- ASR transcribes utterances into words. While PR analyzes the
126
- improvement in modeling phonetics, ASR reflects the significance of
127
- the improvement in a real-world scenario. LibriSpeech
128
- train-clean-100/dev-clean/test-clean subsets are used for
129
- training/validation/testing. The evaluation metric is word error
130
- rate (WER)."""
131
- ),
132
- features=datasets.Features(
133
- {
134
- "file": datasets.Value("string"),
135
- "audio": datasets.features.Audio(sampling_rate=16_000),
136
- "text": datasets.Value("string"),
137
- "speaker_id": datasets.Value("int64"),
138
- "chapter_id": datasets.Value("int64"),
139
- "id": datasets.Value("string"),
140
- }
141
- ),
142
- supervised_keys=("file", "text"),
143
- url="http://www.openslr.org/12",
144
- data_url="data/LibriSpeech-test-clean.zip",
145
- ),
146
- SuperbConfig(
147
- name="ks",
148
- description=textwrap.dedent(
149
- """\
150
- Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
151
- words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
152
- inference time are all crucial. SUPERB uses the widely used Speech Commands dataset v1.0 for the task.
153
- The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
154
- false positive. The evaluation metric is accuracy (ACC)"""
155
- ),
156
- features=datasets.Features(
157
- {
158
- "file": datasets.Value("string"),
159
- "audio": datasets.features.Audio(sampling_rate=16_000),
160
- "label": datasets.ClassLabel(
161
- names=[
162
- "yes",
163
- "no",
164
- "up",
165
- "down",
166
- "left",
167
- "right",
168
- "on",
169
- "off",
170
- "stop",
171
- "go",
172
- "_silence_",
173
- "_unknown_",
174
- ]
175
- ),
176
- }
177
- ),
178
- supervised_keys=("file", "label"),
179
- url="https://www.tensorflow.org/datasets/catalog/speech_commands",
180
- data_url="data/speech_commands_test_set_v0.01.zip",
181
- ),
182
- SuperbConfig(
183
- name="ic",
184
- description=textwrap.dedent(
185
- """\
186
- Intent Classification (IC) classifies utterances into predefined classes to determine the intent of
187
- speakers. SUPERB uses the Fluent Speech Commands dataset, where each utterance is tagged with three intent
188
- labels: action, object, and location. The evaluation metric is accuracy (ACC)."""
189
- ),
190
- features=datasets.Features(
191
- {
192
- "file": datasets.Value("string"),
193
- "audio": datasets.features.Audio(sampling_rate=16_000),
194
- "speaker_id": datasets.Value("string"),
195
- "text": datasets.Value("string"),
196
- "action": datasets.ClassLabel(
197
- names=["activate", "bring", "change language", "deactivate", "decrease", "increase"]
198
- ),
199
- "object": datasets.ClassLabel(
200
- names=[
201
- "Chinese",
202
- "English",
203
- "German",
204
- "Korean",
205
- "heat",
206
- "juice",
207
- "lamp",
208
- "lights",
209
- "music",
210
- "newspaper",
211
- "none",
212
- "shoes",
213
- "socks",
214
- "volume",
215
- ]
216
- ),
217
- "location": datasets.ClassLabel(names=["bedroom", "kitchen", "none", "washroom"]),
218
- }
219
- ),
220
- # no default supervised keys, since there are 3 labels
221
- supervised_keys=None,
222
- url="https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/",
223
- data_url="data/fluent_speech_commands_dataset.zip",
224
- ),
225
- SuperbConfig(
226
- name="si",
227
- description=textwrap.dedent(
228
- """\
229
- Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
230
- classification, where speakers are in the same predefined set for both training and testing. The widely
231
- used VoxCeleb1 dataset is adopted, and the evaluation metric is accuracy (ACC)."""
232
- ),
233
- features=datasets.Features(
234
- {
235
- "file": datasets.Value("string"),
236
- "audio": datasets.features.Audio(sampling_rate=16_000),
237
- "label": datasets.ClassLabel(names=[f"id{i + 10001}" for i in range(1251)]),
238
- }
239
- ),
240
- supervised_keys=("file", "label"),
241
- url="https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html",
242
- data_url="data/VoxCeleb1.zip"
243
- ),
244
- SuperbConfig(
245
- name="er",
246
- description=textwrap.dedent(
247
- """\
248
- Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset
249
- IEMOCAP is adopted, and we follow the conventional evaluation protocol: we drop the unbalance emotion
250
- classes to leave the final four classes with a similar amount of data points and cross-validates on five
251
- folds of the standard splits. The evaluation metric is accuracy (ACC)."""
252
- ),
253
- features=datasets.Features(
254
- {
255
- "file": datasets.Value("string"),
256
- "audio": datasets.features.Audio(sampling_rate=16_000),
257
- "label": datasets.ClassLabel(names=['neu', 'hap', 'ang', 'sad']),
258
- }
259
- ),
260
- supervised_keys=("file", "label"),
261
- url="https://sail.usc.edu/iemocap/",
262
- data_url="data/IEMOCAP_full_release.zip"
263
- ),
264
- ]
265
-
266
- def _info(self):
267
- return datasets.DatasetInfo(
268
- description=_DESCRIPTION,
269
- features=self.config.features,
270
- supervised_keys=self.config.supervised_keys,
271
- homepage=self.config.url,
272
- citation=_CITATION,
273
- )
274
-
275
- def _split_generators(self, dl_manager):
276
- if self.config.name == "asr":
277
- archive_path = dl_manager.download_and_extract(self.config.data_url)
278
- return [
279
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path}),
280
- ]
281
- elif self.config.name == "ks":
282
- archive_path = dl_manager.download_and_extract(self.config.data_url)
283
- return [
284
- datasets.SplitGenerator(
285
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
286
- ),
287
- ]
288
- elif self.config.name == "ic":
289
- archive_path = dl_manager.download_and_extract(self.config.data_url)
290
- return [
291
- datasets.SplitGenerator(
292
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
293
- ),
294
- ]
295
- elif self.config.name == "si":
296
- archive_path = dl_manager.download_and_extract(self.config.data_url)
297
- return [
298
- datasets.SplitGenerator(
299
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": 3}
300
- ),
301
- ]
302
- elif self.config.name == "sd":
303
- archive_path = dl_manager.download_and_extract(self.config.data_url)
304
- return [
305
- datasets.SplitGenerator(
306
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
307
- )
308
- ]
309
- elif self.config.name == "er":
310
- archive_path = dl_manager.download_and_extract(self.config.data_url)
311
- return [
312
- datasets.SplitGenerator(
313
- name="session1", gen_kwargs={"archive_path": archive_path, "split": 1},
314
- )
315
- ]
316
-
317
- def _generate_examples(self, archive_path, split=None):
318
- """Generate examples."""
319
- if self.config.name == "asr":
320
- transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*/*/*/*.txt")
321
- key = 0
322
- for transcript_path in sorted(glob.glob(transcripts_glob)):
323
- transcript_dir_path = os.path.dirname(transcript_path)
324
- with open(transcript_path, "r", encoding="utf-8") as f:
325
- for line in f:
326
- line = line.strip()
327
- id_, transcript = line.split(" ", 1)
328
- audio_file = f"{id_}.flac"
329
- speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
330
- audio_path = os.path.join(transcript_dir_path, audio_file)
331
- yield key, {
332
- "id": id_,
333
- "speaker_id": speaker_id,
334
- "chapter_id": chapter_id,
335
- "file": audio_path,
336
- "audio": audio_path,
337
- "text": transcript,
338
- }
339
- key += 1
340
- elif self.config.name == "ks":
341
- words = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go"]
342
- splits = _split_ks_files(archive_path, split)
343
- for key, audio_file in enumerate(sorted(splits[split])):
344
- base_dir, file_name = os.path.split(audio_file)
345
- _, word = os.path.split(base_dir)
346
- if word in words:
347
- label = word
348
- elif word == "_silence_" or word == "_background_noise_":
349
- label = "_silence_"
350
- else:
351
- label = "_unknown_"
352
- yield key, {"file": audio_file, "audio": audio_file, "label": label}
353
- elif self.config.name == "ic":
354
- root_path = os.path.join(archive_path, "fluent_speech_commands_dataset/")
355
- csv_path = os.path.join(root_path, f"data/{split}_data.csv")
356
- with open(csv_path, encoding="utf-8") as csv_file:
357
- csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
358
- next(csv_reader)
359
- for row in csv_reader:
360
- key, file_path, speaker_id, text, action, object_, location = row
361
- audio_path = os.path.join(root_path, file_path)
362
- yield key, {
363
- "file": audio_path,
364
- "audio": audio_path,
365
- "speaker_id": speaker_id,
366
- "text": text,
367
- "action": action,
368
- "object": object_,
369
- "location": location,
370
- }
371
- elif self.config.name == "si":
372
- wav_path = os.path.join(archive_path, "wav/")
373
- splits_path = os.path.join(archive_path, "veri_test_class.txt")
374
- with open(splits_path, "r", encoding="utf-8") as f:
375
- for key, line in enumerate(f):
376
- split_id, file_path = line.strip().split(" ")
377
- if int(split_id) != split:
378
- continue
379
- speaker_id = file_path.split("/")[0]
380
- audio_path = os.path.join(wav_path, file_path)
381
- yield key, {
382
- "file": audio_path,
383
- "audio": audio_path,
384
- "label": speaker_id,
385
- }
386
- elif self.config.name == "er":
387
- root_path = os.path.join(archive_path, f"Session{split}/")
388
- wav_path = os.path.join(root_path, "sentences/wav/")
389
- labels_path = os.path.join(root_path, "dialog/EmoEvaluation/*.txt")
390
- emotions = ['neu', 'hap', 'ang', 'sad', 'exc']
391
- key = 0
392
- for labels_file in sorted(glob.glob(labels_path)):
393
- with open(labels_file, "r", encoding="utf-8") as f:
394
- for line in f:
395
- if line[0] != "[":
396
- continue
397
- _, filename, emo, _ = line.split("\t")
398
- if emo not in emotions:
399
- continue
400
- wav_subdir = filename.rsplit("_", 1)[0]
401
- filename = f"{filename}.wav"
402
- audio_path = os.path.join(wav_path, wav_subdir, filename)
403
- yield key, {
404
- "file": audio_path,
405
- "audio": audio_path,
406
- "label": emo.replace("exc", "hap"),
407
- }
408
- key += 1
409
-
410
-
411
- def _split_ks_files(archive_path, split):
412
- audio_path = os.path.join(archive_path, "**/*.wav")
413
- audio_paths = glob.glob(audio_path)
414
- if split == "test":
415
- # use all available files for the test archive
416
- return {"test": audio_paths}
417
-
418
- val_list_file = os.path.join(archive_path, "validation_list.txt")
419
- test_list_file = os.path.join(archive_path, "testing_list.txt")
420
- with open(val_list_file, encoding="utf-8") as f:
421
- val_paths = f.read().strip().splitlines()
422
- val_paths = [os.path.join(archive_path, p) for p in val_paths]
423
- with open(test_list_file, encoding="utf-8") as f:
424
- test_paths = f.read().strip().splitlines()
425
- test_paths = [os.path.join(archive_path, p) for p in test_paths]
426
-
427
- # the paths for the train set is just whichever paths that do not exist in
428
- # either the test or validation splits
429
- train_paths = list(set(audio_paths) - set(val_paths) - set(test_paths))
430
-
431
- return {"train": train_paths, "val": val_paths}