darshanmakwana commited on
Commit
61f45fc
·
verified ·
1 Parent(s): 743f27b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +53 -0
README.md CHANGED
@@ -26,3 +26,56 @@ configs:
26
  - split: test
27
  path: data/test-*
28
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  - split: test
27
  path: data/test-*
28
  ---
29
+
30
+ This dataset contains tokenized audio from [lewtun/music_genres](https://huggingface.co/datasets/lewtun/music_genres) using [SemantiCodec](https://arxiv.org/abs/2405.00233) for performing experiments on AR music generation
31
+
32
+ The following script is used for tokenization
33
+ ```python
34
+ from datasets import load_dataset, Dataset, DatasetDict
35
+
36
+ model_id = ""
37
+ repo_name = ""
38
+ user_name = ""
39
+ token = ""
40
+ cache_dir = "cache"
41
+ vocab_size = 4096
42
+
43
+ dataset = load_dataset(model_id, cache_dir=cache_dir, trust_remote_code=True)
44
+
45
+ from semanticodec import SemantiCodec
46
+
47
+ semanticodec = SemantiCodec(token_rate=100, semantic_vocab_size=vocab_size)
48
+
49
+ import soundfile as sf
50
+ from tqdm import tqdm
51
+ import math
52
+
53
+ dd = {
54
+ "train": 0,
55
+ "test": 0
56
+ }
57
+
58
+ for split in ["train", "test"]:
59
+ tkns = []
60
+ for idx in tqdm(range(len(dataset[split]))):
61
+ sample = dataset[split][idx]["audio"]
62
+ array = sample["array"]
63
+ sr = sample["sampling_rate"]
64
+
65
+ sf.write("output.wav", array, sr)
66
+
67
+ tokens = semanticodec.encode("output.wav").detach().cpu().numpy().flatten()
68
+ tkns.append(tokens)
69
+
70
+ ds = Dataset.from_dict({
71
+ "audio_tokens": tkns,
72
+ "genre_id": list(dataset[split]["genre_id"]),
73
+ "genre": list(dataset[split]["genre"]),
74
+ "song_id": list(dataset[split]["song_id"])
75
+ })
76
+ dd[split] = ds
77
+
78
+ dd = DatasetDict(dd)
79
+ dd.save_to_disk(repo_name)
80
+ dd.push_to_hub(f"{user_name}/{repo_name}", token=token)
81
+ ```