File size: 14,529 Bytes
b1d4de0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
# File: dataspeech-main/dataspeech/cpu_enrichments/rate.py
from g2p import make_g2p
transducer = make_g2p('eng', 'eng-ipa')

def rate_apply(batch, rank=None, audio_column_name='audio', text_column_name='text'):
    if isinstance(batch[text_column_name], list):
        speaking_rates = []
        phonemes_list = []
        if 'speech_duration' in batch:
            for (text, audio_duration) in zip(batch[text_column_name], batch['speech_duration']):
                phonemes = transducer(text).output_string
                audio_duration = audio_duration if audio_duration != 0 else 0.01
                speaking_rate = len(phonemes) / audio_duration
                speaking_rates.append(speaking_rate)
                phonemes_list.append(phonemes)
        else:
            for (text, audio) in zip(batch[text_column_name], batch[audio_column_name]):
                phonemes = transducer(text).output_string
                sample_rate = audio['sampling_rate']
                audio_length = len(audio['array'].squeeze()) / sample_rate
                speaking_rate = len(phonemes) / audio_length
                speaking_rates.append(speaking_rate)
                phonemes_list.append(phonemes)
        batch['speaking_rate'] = speaking_rates
        batch['phonemes'] = phonemes_list
    else:
        phonemes = transducer(batch[text_column_name]).output_string
        if 'speech_duration' in batch:
            audio_length = batch['speech_duration'] if batch['speech_duration'] != 0 else 0.01
        else:
            sample_rate = batch[audio_column_name]['sampling_rate']
            audio_length = len(batch[audio_column_name]['array'].squeeze()) / sample_rate
        speaking_rate = len(phonemes) / audio_length
        batch['speaking_rate'] = speaking_rate
        batch['phonemes'] = phonemes
    return batch

# File: dataspeech-main/dataspeech/gpu_enrichments/pitch.py
import torch
import penn
hopsize = 0.01
fmin = 30.0
fmax = 1000.0
checkpoint = None
center = 'half-hop'
interp_unvoiced_at = 0.065

def pitch_apply(batch, rank=None, audio_column_name='audio', output_column_name='utterance_pitch', penn_batch_size=4096):
    if isinstance(batch[audio_column_name], list):
        utterance_pitch_mean = []
        utterance_pitch_std = []
        for sample in batch[audio_column_name]:
            (pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank)
            utterance_pitch_mean.append(pitch.mean().cpu())
            utterance_pitch_std.append(pitch.std().cpu())
        batch[f'{output_column_name}_mean'] = utterance_pitch_mean
        batch[f'{output_column_name}_std'] = utterance_pitch_std
    else:
        sample = batch[audio_column_name]
        (pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank)
        batch[f'{output_column_name}_mean'] = pitch.mean().cpu()
        batch[f'{output_column_name}_std'] = pitch.std().cpu()
    return batch

# File: dataspeech-main/dataspeech/gpu_enrichments/snr_and_reverb.py
from pyannote.audio import Model
from pathlib import Path
from brouhaha.pipeline import RegressiveActivityDetectionPipeline
import torch
from huggingface_hub import hf_hub_download
import numpy as np
model = None
ratio = 16000 / 270

def snr_apply(batch, rank=None, audio_column_name='audio', batch_size=32):
    global model
    if model is None:
        model = Model.from_pretrained(Path(hf_hub_download(repo_id='ylacombe/brouhaha-best', filename='best.ckpt')), strict=False)
    if rank is not None or torch.cuda.device_count() > 0:
        device = f'cuda:{(rank or 0) % torch.cuda.device_count()}'
        model.to(device)
    pipeline = RegressiveActivityDetectionPipeline(segmentation=model, batch_size=batch_size)
    if rank:
        pipeline.to(torch.device(device))
    device = pipeline._models['segmentation'].device
    if isinstance(batch[audio_column_name], list):
        snr = []
        c50 = []
        vad_durations = []
        for sample in batch[audio_column_name]:
            res = pipeline({'sample_rate': sample['sampling_rate'], 'waveform': torch.tensor(sample['array'][None, :]).to(device).float()})
            mask = np.full(res['snr'].shape, False)
            for (segment, _) in res['annotation'].itertracks():
                start = int(segment.start * ratio)
                end = int(segment.end * ratio)
                mask[start:end] = True
            mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask
            vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks()))
            snr.append(res['snr'][mask].mean())
            c50.append(res['c50'][mask].mean())
            vad_durations.append(np.float32(vad_duration))
        batch['snr'] = snr
        batch['c50'] = c50
        batch['speech_duration'] = vad_durations
    else:
        res = pipeline({'sample_rate': batch[audio_column_name]['sampling_rate'], 'waveform': torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float()})
        mask = np.full(res['snr'].shape, False)
        for (segment, _) in res['annotation'].itertracks():
            start = int(segment.start * ratio)
            end = int(segment.end * ratio)
            mask[start:end] = True
        mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask
        vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks()))
        batch['snr'] = res['snr'][mask].mean()
        batch['c50'] = res['c50'][mask].mean()
        batch['speech_duration'] = vad_duration
    return batch

# File: dataspeech-main/dataspeech/gpu_enrichments/squim.py
from torchaudio.pipelines import SQUIM_OBJECTIVE
import torch
import torchaudio
model = None
max_audio_length = 15 * SQUIM_OBJECTIVE.sample_rate

def squim_apply(batch, rank=None, audio_column_name='audio'):
    global model
    if model is None:
        model = SQUIM_OBJECTIVE.get_model()
    if rank is not None or torch.cuda.device_count() > 0:
        device = f'cuda:{(rank or 0) % torch.cuda.device_count()}'
        model.to(device)
    else:
        device = 'cpu'
    if isinstance(batch[audio_column_name], list):
        sdr = []
        pesq = []
        stoi = []
        for sample in batch[audio_column_name]:
            waveform = torchaudio.functional.resample(torch.tensor(sample['array'])[None, :].to(device).float(), sample['sampling_rate'], SQUIM_OBJECTIVE.sample_rate)
            with torch.no_grad():
                waveform = waveform[:, :min(max_audio_length, waveform.shape[1])]
                (stoi_sample, pesq_sample, sdr_sample) = model(waveform)
            sdr.append(sdr_sample.cpu()[0])
            pesq.append(pesq_sample.cpu()[0])
            stoi.append(stoi_sample.cpu()[0])
        batch['sdr'] = sdr
        batch['pesq'] = pesq
        batch['stoi'] = stoi
    else:
        waveform = torchaudio.functional.resample(torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float(), batch[audio_column_name]['sampling_rate'], SQUIM_OBJECTIVE.sample_rate)
        with torch.no_grad():
            (stoi_sample, pesq_sample, sdr_sample) = model(waveform)
        batch['sdr'] = sdr_sample.cpu()[0]
        batch['pesq'] = pesq_sample.cpu()[0]
        batch['stoi'] = stoi_sample.cpu()[0]
    return batch

# File: dataspeech-main/main.py
from datasets import load_dataset, Audio
from multiprocess import set_start_method
from dataspeech import rate_apply, pitch_apply, snr_apply, squim_apply
import torch
import argparse
if __name__ == '__main__':
    set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset_name', type=str, help='Path or name of the dataset. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/loading_methods#datasets.load_dataset.path')
    parser.add_argument('--configuration', default=None, type=str, help='Dataset configuration to use, if necessary.')
    parser.add_argument('--output_dir', default=None, type=str, help='If specified, save the dataset on disk with this path.')
    parser.add_argument('--repo_id', default=None, type=str, help='If specified, push the dataset to the hub.')
    parser.add_argument('--audio_column_name', default='audio', type=str, help='Column name of the audio column to be enriched.')
    parser.add_argument('--text_column_name', default='text', type=str, help='Text column name.')
    parser.add_argument('--rename_column', action='store_true', help="If activated, rename audio and text column names to 'audio' and 'text'. Useful if you want to merge datasets afterwards.")
    parser.add_argument('--cpu_num_workers', default=1, type=int, help="Number of CPU workers for transformations that don't use GPUs or if no GPU are available.")
    parser.add_argument('--cpu_writer_batch_size', default=1000, type=int, help="writer_batch_size for transformations that don't use GPUs. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/main_classes#datasets.Dataset.map.writer_batch_size")
    parser.add_argument('--batch_size', default=2, type=int, help='This parameters specify how many samples are passed by workers for operations that are using GPUs.')
    parser.add_argument('--penn_batch_size', default=4096, type=int, help="Pitch estimation chunks audio into smaller pieces and processes them in batch. This specify the batch size. If you are using a gpu, pick a batch size that doesn't cause memory errors.")
    parser.add_argument('--num_workers_per_gpu_for_pitch', default=1, type=int, help='Number of workers per GPU for the pitch estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
    parser.add_argument('--num_workers_per_gpu_for_snr', default=1, type=int, help='Number of workers per GPU for the SNR and reverberation estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
    parser.add_argument('--apply_squim_quality_estimation', action='store_true', help='If set, will also use torchaudio-squim estimation (SI-SNR, STOI and PESQ).')
    parser.add_argument('--num_workers_per_gpu_for_squim', default=1, type=int, help='Number of workers per GPU for the SI-SNR, STOI and PESQ estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
    args = parser.parse_args()
    if args.configuration:
        dataset = load_dataset(args.dataset_name, args.configuration, num_proc=args.cpu_num_workers)
    else:
        dataset = load_dataset(args.dataset_name, num_proc=args.cpu_num_workers)
    audio_column_name = 'audio' if args.rename_column else args.audio_column_name
    text_column_name = 'text' if args.rename_column else args.text_column_name
    if args.rename_column:
        dataset = dataset.rename_columns({args.audio_column_name: 'audio', args.text_column_name: 'text'})
    if args.apply_squim_quality_estimation:
        print('Compute SI-SDR, PESQ, STOI')
        squim_dataset = dataset.map(squim_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_squim if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name})
    print('Compute pitch')
    pitch_dataset = dataset.cast_column(audio_column_name, Audio(sampling_rate=16000)).map(pitch_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_pitch if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'penn_batch_size': args.penn_batch_size})
    print('Compute snr and reverb')
    snr_dataset = dataset.map(snr_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_snr if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name})
    print('Compute speaking rate')
    if 'speech_duration' in snr_dataset[next(iter(snr_dataset.keys()))].features:
        rate_dataset = snr_dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name})
    else:
        rate_dataset = dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name})
    for split in dataset.keys():
        dataset[split] = pitch_dataset[split].add_column('snr', snr_dataset[split]['snr']).add_column('c50', snr_dataset[split]['c50'])
        if 'speech_duration' in snr_dataset[split]:
            dataset[split] = dataset[split].add_column('speech_duration', snr_dataset[split]['speech_duration'])
        dataset[split] = dataset[split].add_column('speaking_rate', rate_dataset[split]['speaking_rate']).add_column('phonemes', rate_dataset[split]['phonemes'])
        if args.apply_squim_quality_estimation:
            dataset[split] = dataset[split].add_column('stoi', squim_dataset[split]['stoi']).add_column('si-sdr', squim_dataset[split]['sdr']).add_column('pesq', squim_dataset[split]['pesq'])
    if args.output_dir:
        print('Saving to disk...')
        dataset.save_to_disk(args.output_dir)
    if args.repo_id:
        print('Pushing to the hub...')
        if args.configuration:
            dataset.push_to_hub(args.repo_id, args.configuration)
        else:
            dataset.push_to_hub(args.repo_id)