TristanBehrens
commited on
Commit
·
cd54e13
1
Parent(s):
1e735e8
Upload lakhclean_gpt2_generation.ipynb
Browse files- lakhclean_gpt2_generation.ipynb +316 -0
lakhclean_gpt2_generation.ipynb
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "DWLOSBkp0A2U"
|
7 |
+
},
|
8 |
+
"source": [
|
9 |
+
"# GPT-2 for music - By Dr. Tristan Behrens\n",
|
10 |
+
"\n",
|
11 |
+
"This notebook shows you how to generate music with GPT-2\n",
|
12 |
+
"\n",
|
13 |
+
"--- \n",
|
14 |
+
"\n",
|
15 |
+
"## Find me online\n",
|
16 |
+
"\n",
|
17 |
+
"- https://www.linkedin.com/in/dr-tristan-behrens-734967a2/\n",
|
18 |
+
"- https://twitter.com/DrTBehrens\n",
|
19 |
+
"- https://github.com/AI-Guru\n",
|
20 |
+
"- https://huggingface.co/TristanBehrens\n",
|
21 |
+
"- https://huggingface.co/ai-guru\n",
|
22 |
+
"\n",
|
23 |
+
"\n",
|
24 |
+
"---\n",
|
25 |
+
"\n",
|
26 |
+
"## Install depencencies.\n",
|
27 |
+
"\n",
|
28 |
+
"The following cell sets up fluidsynth and pyfluidsynth on colaboratory."
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"source": [
|
34 |
+
"if \"google.colab\" in str(get_ipython()):\n",
|
35 |
+
" print(\"Installing dependencies...\")\n",
|
36 |
+
" !apt-get update -qq && apt-get install -qq libfluidsynth2 build-essential libasound2-dev libjack-dev\n",
|
37 |
+
" !pip install -qU pyfluidsynth"
|
38 |
+
],
|
39 |
+
"metadata": {
|
40 |
+
"id": "k1a8sd2KZCz9"
|
41 |
+
},
|
42 |
+
"execution_count": null,
|
43 |
+
"outputs": []
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "code",
|
47 |
+
"execution_count": null,
|
48 |
+
"metadata": {
|
49 |
+
"id": "6J_AnhV8D5p6"
|
50 |
+
},
|
51 |
+
"outputs": [],
|
52 |
+
"source": [
|
53 |
+
"!pip install transformers\n",
|
54 |
+
"!pip install note_seq"
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"cell_type": "markdown",
|
59 |
+
"metadata": {
|
60 |
+
"id": "RzhHhFll0JVl"
|
61 |
+
},
|
62 |
+
"source": [
|
63 |
+
"## Load the tokenizer and the model from 🤗 Hub."
|
64 |
+
]
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"cell_type": "code",
|
68 |
+
"source": [
|
69 |
+
"import os\n",
|
70 |
+
"os.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\""
|
71 |
+
],
|
72 |
+
"metadata": {
|
73 |
+
"id": "zGupj_vuZ9f2"
|
74 |
+
},
|
75 |
+
"execution_count": null,
|
76 |
+
"outputs": []
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"cell_type": "code",
|
80 |
+
"execution_count": null,
|
81 |
+
"metadata": {
|
82 |
+
"id": "g3ih12FMD7bs"
|
83 |
+
},
|
84 |
+
"outputs": [],
|
85 |
+
"source": [
|
86 |
+
"from transformers import AutoTokenizer, AutoModelForCausalLM\n",
|
87 |
+
"\n",
|
88 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"ai-guru/lakhclean_mmmtrack_4bars_d-2048\")\n",
|
89 |
+
"model = AutoModelForCausalLM.from_pretrained(\"ai-guru/lakhclean_mmmtrack_4bars_d-2048\")"
|
90 |
+
]
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"cell_type": "markdown",
|
94 |
+
"metadata": {
|
95 |
+
"id": "YfHXFugA0WdI"
|
96 |
+
},
|
97 |
+
"source": [
|
98 |
+
"## Convert the generated tokens to music that you can listen to.\n",
|
99 |
+
"\n",
|
100 |
+
"This uses note_seq, which is something like MIDI coming from Google Magenta. You could even use it to load and save MIDI files. Check their repo if you want to learn more.\n"
|
101 |
+
]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"cell_type": "code",
|
105 |
+
"execution_count": null,
|
106 |
+
"metadata": {
|
107 |
+
"id": "L3QMj8NyEBqs"
|
108 |
+
},
|
109 |
+
"outputs": [],
|
110 |
+
"source": [
|
111 |
+
"import note_seq\n",
|
112 |
+
"\n",
|
113 |
+
"NOTE_LENGTH_16TH_120BPM = 0.25 * 60 / 120\n",
|
114 |
+
"BAR_LENGTH_120BPM = 4.0 * 60 / 120\n",
|
115 |
+
"\n",
|
116 |
+
"def token_sequence_to_note_sequence(token_sequence, use_program=True, use_drums=True, instrument_mapper=None, only_piano=False):\n",
|
117 |
+
"\n",
|
118 |
+
" if isinstance(token_sequence, str):\n",
|
119 |
+
" token_sequence = token_sequence.split()\n",
|
120 |
+
"\n",
|
121 |
+
" note_sequence = empty_note_sequence()\n",
|
122 |
+
"\n",
|
123 |
+
" # Render all notes.\n",
|
124 |
+
" current_program = 1\n",
|
125 |
+
" current_is_drum = False\n",
|
126 |
+
" current_instrument = 0\n",
|
127 |
+
" track_count = 0\n",
|
128 |
+
" for token_index, token in enumerate(token_sequence):\n",
|
129 |
+
"\n",
|
130 |
+
" if token == \"PIECE_START\":\n",
|
131 |
+
" pass\n",
|
132 |
+
" elif token == \"PIECE_END\":\n",
|
133 |
+
" print(\"The end.\")\n",
|
134 |
+
" break\n",
|
135 |
+
" elif token == \"TRACK_START\":\n",
|
136 |
+
" current_bar_index = 0\n",
|
137 |
+
" track_count += 1\n",
|
138 |
+
" pass\n",
|
139 |
+
" elif token == \"TRACK_END\":\n",
|
140 |
+
" pass\n",
|
141 |
+
" elif token == \"KEYS_START\":\n",
|
142 |
+
" pass\n",
|
143 |
+
" elif token == \"KEYS_END\":\n",
|
144 |
+
" pass\n",
|
145 |
+
" elif token.startswith(\"KEY=\"):\n",
|
146 |
+
" pass\n",
|
147 |
+
" elif token.startswith(\"INST\"):\n",
|
148 |
+
" instrument = token.split(\"=\")[-1]\n",
|
149 |
+
" if instrument != \"DRUMS\" and use_program:\n",
|
150 |
+
" if instrument_mapper is not None:\n",
|
151 |
+
" if instrument in instrument_mapper:\n",
|
152 |
+
" instrument = instrument_mapper[instrument]\n",
|
153 |
+
" current_program = int(instrument)\n",
|
154 |
+
" current_instrument = track_count\n",
|
155 |
+
" current_is_drum = False\n",
|
156 |
+
" if instrument == \"DRUMS\" and use_drums:\n",
|
157 |
+
" current_instrument = 0\n",
|
158 |
+
" current_program = 0\n",
|
159 |
+
" current_is_drum = True\n",
|
160 |
+
" elif token == \"BAR_START\":\n",
|
161 |
+
" current_time = current_bar_index * BAR_LENGTH_120BPM\n",
|
162 |
+
" current_notes = {}\n",
|
163 |
+
" elif token == \"BAR_END\":\n",
|
164 |
+
" current_bar_index += 1\n",
|
165 |
+
" pass\n",
|
166 |
+
" elif token.startswith(\"NOTE_ON\"):\n",
|
167 |
+
" pitch = int(token.split(\"=\")[-1])\n",
|
168 |
+
" note = note_sequence.notes.add()\n",
|
169 |
+
" note.start_time = current_time\n",
|
170 |
+
" note.end_time = current_time + 4 * NOTE_LENGTH_16TH_120BPM\n",
|
171 |
+
" note.pitch = pitch\n",
|
172 |
+
" note.instrument = current_instrument\n",
|
173 |
+
" note.program = current_program\n",
|
174 |
+
" note.velocity = 80\n",
|
175 |
+
" note.is_drum = current_is_drum\n",
|
176 |
+
" current_notes[pitch] = note\n",
|
177 |
+
" elif token.startswith(\"NOTE_OFF\"):\n",
|
178 |
+
" pitch = int(token.split(\"=\")[-1])\n",
|
179 |
+
" if pitch in current_notes:\n",
|
180 |
+
" note = current_notes[pitch]\n",
|
181 |
+
" note.end_time = current_time\n",
|
182 |
+
" elif token.startswith(\"TIME_DELTA\"):\n",
|
183 |
+
" delta = float(token.split(\"=\")[-1]) * NOTE_LENGTH_16TH_120BPM\n",
|
184 |
+
" current_time += delta\n",
|
185 |
+
" elif token.startswith(\"DENSITY=\"):\n",
|
186 |
+
" pass\n",
|
187 |
+
" elif token == \"[PAD]\":\n",
|
188 |
+
" pass\n",
|
189 |
+
" else:\n",
|
190 |
+
" #print(f\"Ignored token {token}.\")\n",
|
191 |
+
" pass\n",
|
192 |
+
"\n",
|
193 |
+
" # Make the instruments right.\n",
|
194 |
+
" instruments_drums = []\n",
|
195 |
+
" for note in note_sequence.notes:\n",
|
196 |
+
" pair = [note.program, note.is_drum]\n",
|
197 |
+
" if pair not in instruments_drums:\n",
|
198 |
+
" instruments_drums += [pair]\n",
|
199 |
+
" note.instrument = instruments_drums.index(pair)\n",
|
200 |
+
"\n",
|
201 |
+
" if only_piano:\n",
|
202 |
+
" for note in note_sequence.notes:\n",
|
203 |
+
" if not note.is_drum:\n",
|
204 |
+
" note.instrument = 0\n",
|
205 |
+
" note.program = 0\n",
|
206 |
+
"\n",
|
207 |
+
" return note_sequence\n",
|
208 |
+
"\n",
|
209 |
+
"def empty_note_sequence(qpm=120.0, total_time=0.0):\n",
|
210 |
+
" note_sequence = note_seq.protobuf.music_pb2.NoteSequence()\n",
|
211 |
+
" note_sequence.tempos.add().qpm = qpm\n",
|
212 |
+
" note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ\n",
|
213 |
+
" note_sequence.total_time = total_time\n",
|
214 |
+
" return note_sequence"
|
215 |
+
]
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"cell_type": "markdown",
|
219 |
+
"source": [
|
220 |
+
"## Generate music\n",
|
221 |
+
"\n",
|
222 |
+
"This will generate one track of music and render it. "
|
223 |
+
],
|
224 |
+
"metadata": {
|
225 |
+
"id": "4kr2dECziaFA"
|
226 |
+
}
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"cell_type": "code",
|
230 |
+
"source": [
|
231 |
+
"generated_sequence = \"PIECE_START\""
|
232 |
+
],
|
233 |
+
"metadata": {
|
234 |
+
"id": "cUg1DrlygzgT"
|
235 |
+
},
|
236 |
+
"execution_count": null,
|
237 |
+
"outputs": []
|
238 |
+
},
|
239 |
+
{
|
240 |
+
"cell_type": "markdown",
|
241 |
+
"source": [
|
242 |
+
"Note: Run the following cell multiple times to generate more tracks."
|
243 |
+
],
|
244 |
+
"metadata": {
|
245 |
+
"id": "SinUPIHyimr5"
|
246 |
+
}
|
247 |
+
},
|
248 |
+
{
|
249 |
+
"cell_type": "code",
|
250 |
+
"execution_count": null,
|
251 |
+
"metadata": {
|
252 |
+
"id": "ZYpukydNESDF"
|
253 |
+
},
|
254 |
+
"outputs": [],
|
255 |
+
"source": [
|
256 |
+
"# Encode the conditioning tokens.\n",
|
257 |
+
"input_ids = tokenizer.encode(generated_sequence, return_tensors=\"pt\")\n",
|
258 |
+
"#print(input_ids)\n",
|
259 |
+
"\n",
|
260 |
+
"# Generate more tokens.\n",
|
261 |
+
"eos_token_id = tokenizer.encode(\"TRACK_END\")[0]\n",
|
262 |
+
"temperature = 1.0\n",
|
263 |
+
"generated_ids = model.generate(\n",
|
264 |
+
" input_ids, \n",
|
265 |
+
" max_length=2048,\n",
|
266 |
+
" do_sample=True,\n",
|
267 |
+
" temperature=temperature,\n",
|
268 |
+
" eos_token_id=eos_token_id,\n",
|
269 |
+
")\n",
|
270 |
+
"generated_sequence = tokenizer.decode(generated_ids[0])\n",
|
271 |
+
"print(generated_sequence)\n",
|
272 |
+
"\n",
|
273 |
+
"note_sequence = token_sequence_to_note_sequence(generated_sequence)\n",
|
274 |
+
"\n",
|
275 |
+
"synth = note_seq.fluidsynth\n",
|
276 |
+
"note_seq.plot_sequence(note_sequence)\n",
|
277 |
+
"note_seq.play_sequence(note_sequence, synth)"
|
278 |
+
]
|
279 |
+
},
|
280 |
+
{
|
281 |
+
"cell_type": "markdown",
|
282 |
+
"metadata": {
|
283 |
+
"id": "d1x6HeF90kkO"
|
284 |
+
},
|
285 |
+
"source": [
|
286 |
+
"# Thank you!"
|
287 |
+
]
|
288 |
+
}
|
289 |
+
],
|
290 |
+
"metadata": {
|
291 |
+
"colab": {
|
292 |
+
"provenance": []
|
293 |
+
},
|
294 |
+
"kernelspec": {
|
295 |
+
"display_name": "Python 3 (ipykernel)",
|
296 |
+
"language": "python",
|
297 |
+
"name": "python3"
|
298 |
+
},
|
299 |
+
"language_info": {
|
300 |
+
"codemirror_mode": {
|
301 |
+
"name": "ipython",
|
302 |
+
"version": 3
|
303 |
+
},
|
304 |
+
"file_extension": ".py",
|
305 |
+
"mimetype": "text/x-python",
|
306 |
+
"name": "python",
|
307 |
+
"nbconvert_exporter": "python",
|
308 |
+
"pygments_lexer": "ipython3",
|
309 |
+
"version": "3.9.7"
|
310 |
+
},
|
311 |
+
"accelerator": "GPU",
|
312 |
+
"gpuClass": "standard"
|
313 |
+
},
|
314 |
+
"nbformat": 4,
|
315 |
+
"nbformat_minor": 0
|
316 |
+
}
|