File size: 1,405 Bytes
adba853 095e160 adba853 ac2764c adba853 ac2764c 53a01ee ac2764c 9ac1f4d ac2764c 271afe2 ac2764c 4e1ae22 b7b84d2 adba853 4e1ae22 f3a146b ac2764c adba853 ac2764c 53a01ee ac2764c 9ac1f4d ac2764c 271afe2 ac2764c 4e1ae22 adba853 4e1ae22 f3a146b ac2764c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
---
license: mit
tags:
- audio-generation
---
[Dance Diffusion](https://github.com/Harmonai-org/sample-generator) is now available in 🧨 Diffusers.
## FP32
```python
# !pip install diffusers[torch] accelerate scipy
from diffusers import DiffusionPipeline
import scipy.io
model_id = "harmonai/maestro-150k"
pipeline = DiffusionPipeline.from_pretrained(model_id)
pipeline = pipeline.to("cuda")
audios = pipeline(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
scipy.io.wavfile.write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
```
## FP16
Faster at a small loss of quality
```python
# !pip install diffusers[torch] accelerate scipy
from diffusers import DiffusionPipeline
import scipy.io
import torch
model_id = "harmonai/maestro-150k"
pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
audios = pipeline(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
scipy.io.wavfile.write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
``` |