elizabetvaganova commited on
Commit
555cb72
·
1 Parent(s): ff760f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -19
app.py CHANGED
@@ -1,23 +1,13 @@
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
- from vosk import KaldiRecognizer, Model
5
  from datasets import load_dataset
6
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor
7
- pip install vosk
8
-
9
 
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
 
12
- # Load Vosk automatic speech recognition model
13
- vosk_model = Model("elizabetvaganova/speech-to-speech-translation-vaganova")
14
-
15
- def recognize_speech(audio):
16
- recognizer = KaldiRecognizer(vosk_model, 16000)
17
- recognizer.AcceptWaveform(audio.data)
18
- result = recognizer.FinalResult()
19
- return result["text"]
20
-
21
  # Load a lightweight text-to-speech checkpoint and speaker embeddings
22
  processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech")
23
 
@@ -27,11 +17,11 @@ vocoder = SpeechT5HifiGan.from_pretrained("ljspeech/vocoder-cryptron").to(device
27
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
28
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
29
 
30
- def translate(audio):
31
- recognizer = KaldiRecognizer(vosk_model, 16000)
32
- recognizer.AcceptWaveform(audio.data)
33
- result = recognizer.FinalResult()
34
- return result["text"]
35
 
36
  def synthesise(text):
37
  inputs = processor(text=text, return_tensors="pt")
@@ -39,14 +29,14 @@ def synthesise(text):
39
  return speech.cpu()
40
 
41
  def speech_to_speech_translation(audio):
42
- translated_text = translate(audio)
43
  synthesised_speech = synthesise(translated_text)
44
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
45
  return 16000, synthesised_speech
46
 
47
  title = "Cascaded STST"
48
  description = """
49
- Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses Vosk for automatic speech recognition, and lightweight text-to-speech and vocoder models.
50
  """
51
 
52
  demo = gr.Blocks()
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
+ import speech_recognition as sr
5
  from datasets import load_dataset
6
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor
7
+ pip install SpeechRecognition
 
8
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
 
 
 
 
 
 
 
 
 
11
  # Load a lightweight text-to-speech checkpoint and speaker embeddings
12
  processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech")
13
 
 
17
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
18
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
19
 
20
+ def recognize_speech(audio):
21
+ recognizer = sr.Recognizer()
22
+ with sr.AudioFile(audio) as source:
23
+ audio_data = recognizer.record(source)
24
+ return recognizer.recognize_google(audio_data)
25
 
26
  def synthesise(text):
27
  inputs = processor(text=text, return_tensors="pt")
 
29
  return speech.cpu()
30
 
31
  def speech_to_speech_translation(audio):
32
+ translated_text = recognize_speech(audio)
33
  synthesised_speech = synthesise(translated_text)
34
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
35
  return 16000, synthesised_speech
36
 
37
  title = "Cascaded STST"
38
  description = """
39
+ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses Google Web Speech API for automatic speech recognition, and lightweight text-to-speech and vocoder models.
40
  """
41
 
42
  demo = gr.Blocks()