|
import datasets |
|
from pathlib import Path |
|
import stempeg |
|
import numpy as np |
|
|
|
|
|
_DESCRIPTION = """\ |
|
MUSDB18 music source separation dataset |
|
|
|
to open original stem file (mp4), which is done internally you need stempeg library. |
|
Outcome of mp4 file is a 3 dimensional np_array [n_stems, n_samples, sample_rate]. |
|
|
|
firt dimension meanings: { |
|
0: mixture. |
|
1: drugs, |
|
2: bass, |
|
3: others, |
|
4:vocals, |
|
} |
|
|
|
Original dataset is not cutted in any parts, but here I cut each song in 10 seconds chunks with 1 sec overlap. |
|
""" |
|
|
|
_DESCRIPTION = "musdb dataset" |
|
|
|
|
|
class Musdb18Dataset(datasets.GeneratorBasedBuilder): |
|
DEFAULT_WRITER_BATCH_SIZE = 300 |
|
SAMPLING_RATE = 44100 |
|
WINDOW_SIZE = SAMPLING_RATE * 10 |
|
INSTRUMENT_NAMES = ["mixture", "drums", "bass", "other", "vocals"] |
|
|
|
|
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"name": datasets.Value("string"), |
|
"n_window": datasets.Value("int16"), |
|
**{ |
|
name: datasets.Audio( |
|
sampling_rate=self.SAMPLING_RATE, mono=False |
|
) |
|
for name in self.INSTRUMENT_NAMES |
|
}, |
|
"mean": datasets.Value("float"), |
|
"std": datasets.Value("float"), |
|
} |
|
), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
archive_path = dl_manager.download_and_extract( |
|
"https://zenodo.org/record/1117372/files/musdb18.zip?download=1" |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"audio_path": f"{archive_path}/train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"audio_path": f"{archive_path}/test"}, |
|
), |
|
] |
|
|
|
def _generate_stem_dict(self, S, song_name, end): |
|
return { |
|
name: { |
|
"path": f"{song_name}/{name}", |
|
"array": S[i, end - self.WINDOW_SIZE : end, :], |
|
"sampling_rate": self.SAMPLING_RATE, |
|
} |
|
for i, name in enumerate(self.INSTRUMENT_NAMES) |
|
} |
|
|
|
def _generate_examples(self, audio_path): |
|
id_ = 0 |
|
for stems_path in Path(audio_path).iterdir(): |
|
song_name = stems_path.stem |
|
S, sr = stempeg.read_stems( |
|
str(stems_path), dtype=np.float32, multiprocess=False |
|
) |
|
|
|
mixture = S.sum(axis=0).T |
|
assert mixture.shape[0] == 2 |
|
|
|
mixture = mixture.mean(0) |
|
mean = mixture.mean().item() |
|
std = mixture.std().item() |
|
|
|
for idx, end in enumerate( |
|
range(self.WINDOW_SIZE, S.shape[1], self.WINDOW_SIZE) |
|
): |
|
yield id_, { |
|
"name": song_name, |
|
"n_window": idx, |
|
**self._generate_stem_dict(S, song_name, end), |
|
"mean": mean, |
|
"std": std, |
|
} |
|
|
|
id_ += 1 |
|
|
|
|
|
yield id_, { |
|
"name": song_name, |
|
"n_window": idx + 1, |
|
**self._generate_stem_dict(S, song_name, end=S.shape[1]), |
|
"mean": mean, |
|
"std": std, |
|
} |
|
|
|
id_ += 1 |
|
|