Spaces:
Runtime error
Runtime error
mrolando
commited on
Commit
·
0592ce5
1
Parent(s):
74138e0
test
Browse files
app.py
CHANGED
@@ -19,22 +19,19 @@ pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
|
|
19 |
pipe = pipe.to(device)
|
20 |
# pipe.unet = torch.compile(pipe.unet)
|
21 |
#pipe.unet = torch.compile(pipe.unet)
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
import base64
|
26 |
|
27 |
with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
|
28 |
encoded_image = base64.b64encode(image_file.read()).decode()
|
29 |
|
30 |
-
CKPT = "facebook/nllb-200-distilled-600M"
|
31 |
|
32 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(CKPT)
|
33 |
-
tokenizer = AutoTokenizer.from_pretrained(CKPT)
|
34 |
def generate_sound(text,steps,audio_length,negative_prompt):
|
35 |
print(text)
|
36 |
-
text=translate_text(text)
|
37 |
-
negative_prompt = translate_text(negative_prompt)
|
38 |
print(text)
|
39 |
waveforms = pipe(text,
|
40 |
num_inference_steps=steps,
|
@@ -43,17 +40,17 @@ def generate_sound(text,steps,audio_length,negative_prompt):
|
|
43 |
rate =16000
|
44 |
return rate, waveforms[0]
|
45 |
|
46 |
-
def translate_text(text):
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
-
|
57 |
# def translate_text(text):
|
58 |
# text = es_en_translator(text)[0].get("translation_text")
|
59 |
# return text
|
|
|
19 |
pipe = pipe.to(device)
|
20 |
# pipe.unet = torch.compile(pipe.unet)
|
21 |
#pipe.unet = torch.compile(pipe.unet)
|
|
|
|
|
|
|
22 |
import base64
|
23 |
|
24 |
with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
|
25 |
encoded_image = base64.b64encode(image_file.read()).decode()
|
26 |
|
27 |
+
# CKPT = "facebook/nllb-200-distilled-600M"
|
28 |
|
29 |
+
# model = AutoModelForSeq2SeqLM.from_pretrained(CKPT)
|
30 |
+
# tokenizer = AutoTokenizer.from_pretrained(CKPT)
|
31 |
def generate_sound(text,steps,audio_length,negative_prompt):
|
32 |
print(text)
|
33 |
+
# text=translate_text(text)
|
34 |
+
# negative_prompt = translate_text(negative_prompt)
|
35 |
print(text)
|
36 |
waveforms = pipe(text,
|
37 |
num_inference_steps=steps,
|
|
|
40 |
rate =16000
|
41 |
return rate, waveforms[0]
|
42 |
|
43 |
+
# def translate_text(text):
|
44 |
+
# translation_pipeline = pipeline("translation",
|
45 |
+
# model=model,
|
46 |
+
# tokenizer=tokenizer,
|
47 |
+
# src_lang="spa_Latn",
|
48 |
+
# tgt_lang="eng_Latn",
|
49 |
+
# max_length=400,
|
50 |
+
# device=device)
|
51 |
|
52 |
+
# result = translation_pipeline(text)
|
53 |
+
# return result[0]['translation_text']
|
54 |
# def translate_text(text):
|
55 |
# text = es_en_translator(text)[0].get("translation_text")
|
56 |
# return text
|