Text-to-Audio
Transformers
English
Inference Endpoints
hungchiayu commited on
Commit
59f625c
·
verified ·
1 Parent(s): 8f6341f

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +128 -0
handler.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import logger
3
+ import spaces
4
+ import gradio as gr
5
+ import json
6
+ import torch
7
+ import wavio
8
+ from tqdm import tqdm
9
+ from huggingface_hub import snapshot_download
10
+ from models import AudioDiffusion, DDPMScheduler
11
+ from audioldm.audio.stft import TacotronSTFT
12
+ from audioldm.variational_autoencoder import AutoencoderKL
13
+ from pydub import AudioSegment
14
+ from gradio import Markdown
15
+
16
+ import torch
17
+ #from diffusers.models.autoencoder_kl import AutoencoderKL
18
+ from diffusers.models.unet_2d_condition import UNet2DConditionModel
19
+ from diffusers import DiffusionPipeline,AudioPipelineOutput
20
+ from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
21
+ from typing import Union
22
+ from diffusers.utils.torch_utils import randn_tensor
23
+ from tqdm import tqdm
24
+
25
+ class Tango:
26
+ def __init__(self, name="declare-lab/tango2", device=device_selection):
27
+
28
+ path = snapshot_download(repo_id=name)
29
+
30
+ vae_config = json.load(open("{}/vae_config.json".format(path)))
31
+ stft_config = json.load(open("{}/stft_config.json".format(path)))
32
+ main_config = json.load(open("{}/main_config.json".format(path)))
33
+
34
+ self.vae = AutoencoderKL(**vae_config).to(device)
35
+ self.stft = TacotronSTFT(**stft_config).to(device)
36
+ self.model = AudioDiffusion(**main_config).to(device)
37
+
38
+ vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device)
39
+ stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device)
40
+ main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device)
41
+
42
+ self.vae.load_state_dict(vae_weights)
43
+ self.stft.load_state_dict(stft_weights)
44
+ self.model.load_state_dict(main_weights)
45
+
46
+ print ("Successfully loaded checkpoint from:", name)
47
+
48
+ self.vae.eval()
49
+ self.stft.eval()
50
+ self.model.eval()
51
+
52
+ self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler")
53
+
54
+ def chunks(self, lst, n):
55
+ """ Yield successive n-sized chunks from a list. """
56
+ for i in range(0, len(lst), n):
57
+ yield lst[i:i + n]
58
+
59
+ def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
60
+ """ Genrate audio for a single prompt string. """
61
+ with torch.no_grad():
62
+ latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
63
+ mel = self.vae.decode_first_stage(latents)
64
+ wave = self.vae.decode_to_waveform(mel)
65
+ return wave[0]
66
+
67
+ def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
68
+ """ Genrate audio for a list of prompt strings. """
69
+ outputs = []
70
+ for k in tqdm(range(0, len(prompts), batch_size)):
71
+ batch = prompts[k: k+batch_size]
72
+ with torch.no_grad():
73
+ latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
74
+ mel = self.vae.decode_first_stage(latents)
75
+ wave = self.vae.decode_to_waveform(mel)
76
+ outputs += [item for item in wave]
77
+ if samples == 1:
78
+ return outputs
79
+ else:
80
+ return list(self.chunks(outputs, samples))
81
+
82
+ # Initialize TANGO
83
+
84
+
85
+
86
+
87
+ class EndpointHandler():
88
+ def __init__(self, path=""):
89
+ # Preload all the elements you are going to need at inference.
90
+ # pseudo:
91
+ self.model= tango(device='cuda')
92
+
93
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
94
+ """
95
+ data args:
96
+ inputs (:obj: `str` | `PIL.Image` | `np.array`)
97
+ kwargs
98
+ Return:
99
+ A :obj:`list` | `dict`: will be serialized and returned
100
+ """
101
+
102
+ # pseudo
103
+ # self.model(input)
104
+ inputs = data.pop("inputs", data)
105
+
106
+ logger.info(f"Received incoming request with {data=}")
107
+
108
+ if "inputs" in data and isinstance(data["inputs"], str):
109
+ prompt = data.pop("inputs")
110
+ elif "prompt" in data and isinstance(data["prompt"], str):
111
+ prompt = data.pop("prompt")
112
+ else:
113
+ raise ValueError(
114
+ "Provided input body must contain either the key `inputs` or `prompt` with the"
115
+ " prompt to use for the image generation, and it needs to be a non-empty string."
116
+ )
117
+
118
+ parameters = data.pop("parameters", {})
119
+
120
+ num_inference_steps = parameters.get("num_inference_steps", 30)
121
+ width = parameters.get("width", 1024)
122
+ height = parameters.get("height", 768)
123
+ guidance_scale = parameters.get("guidance_scale", 3.5)
124
+
125
+ # seed generator (seed cannot be provided as is but via a generator)
126
+ seed = parameters.get("seed", 0)
127
+ generator = torch.manual_seed(seed)
128
+