elizabetvaganova commited on
Commit
6626f2f
·
1 Parent(s): 7b53c86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -61
app.py CHANGED
@@ -2,70 +2,15 @@ import gradio as gr
2
  import numpy as np
3
  import torch
4
  from datasets import load_dataset
5
-
6
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
7
 
8
-
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
- # load speech translation checkpoint
12
- asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
13
-
14
- # load text-to-speech checkpoint and speaker embeddings
15
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
-
17
- asr_pipe = pipeline("automatic-speech-recognition", model="new/asr/model", device=device)
18
-
19
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
20
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
21
-
22
-
23
- def translate(audio):
24
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
25
- return outputs["text"]
26
-
27
-
28
- def synthesise(text):
29
- inputs = processor(text=text, return_tensors="pt")
30
- speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
31
- return speech.cpu()
32
-
33
-
34
- def speech_to_speech_translation(audio):
35
- translated_text = translate(audio)
36
- synthesised_speech = synthesise(translated_text)
37
- synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
38
- return 16000, synthesised_speech
39
-
40
-
41
- title = "Cascaded STST"
42
- description = """
43
- Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
44
- [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
45
-
46
- ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
47
- """
48
-
49
- demo = gr.Blocks()
50
-
51
- mic_translate = gr.Interface(
52
- fn=speech_to_speech_translation,
53
- inputs=gr.Audio(source="microphone", type="filepath"),
54
- outputs=gr.Audio(label="Generated Speech", type="numpy"),
55
- title=title,
56
- description=description,
57
- )
58
-
59
- file_translate = gr.Interface(
60
- fn=speech_to_speech_translation,
61
- inputs=gr.Audio(source="upload", type="filepath"),
62
- outputs=gr.Audio(label="Generated Speech", type="numpy"),
63
- examples=[["./example.wav"]],
64
- title=title,
65
- description=description,
66
- )
67
 
68
- with demo:
69
- gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
70
 
71
- demo.launch()
 
 
2
  import numpy as np
3
  import torch
4
  from datasets import load_dataset
 
5
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
6
 
 
7
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
8
 
9
+ # Load a lightweight automatic speech recognition model (vosk)
10
+ asr_pipe = pipeline("automatic-speech-recognition", model="alphacep/kaldi-ru", device=device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Load a lightweight text-to-speech checkpoint and speaker embeddings
13
+ processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech")
14
 
15
+ model = SpeechT5ForTextToSpeech.from_pretrained("ttskit/ttskit-tts-ljspeech").to(device)
16
+ vocoder = SpeechT5HifiGan.from_pretrained("ljspeech/vocoder-cry