elizabetvaganova commited on
Commit
db98dd8
·
1 Parent(s): 29f42c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -1,19 +1,16 @@
1
- import subprocess
2
-
3
- # Установка зависимостей внутри пространства
4
- subprocess.run(["pip", "install", "vosk"])
5
- subprocess.run(["pip", "install", "SpeechRecognition"])
6
-
7
  import gradio as gr
8
  import numpy as np
9
  import torch
10
- import speech_recognition as sr
11
  from datasets import load_dataset
12
- from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor
 
13
 
14
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
15
 
16
- # Load a lightweight text-to-speech checkpoint and speaker embeddings
 
 
 
17
  processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech")
18
 
19
  model = SpeechT5ForTextToSpeech.from_pretrained("ttskit/ttskit-tts-ljspeech").to(device)
@@ -22,11 +19,9 @@ vocoder = SpeechT5HifiGan.from_pretrained("ljspeech/vocoder-cryptron").to(device
22
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
23
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
24
 
25
- def recognize_speech(audio):
26
- recognizer = sr.Recognizer()
27
- with sr.AudioFile(audio) as source:
28
- audio_data = recognizer.record(source)
29
- return recognizer.recognize_google(audio_data)
30
 
31
  def synthesise(text):
32
  inputs = processor(text=text, return_tensors="pt")
@@ -34,14 +29,14 @@ def synthesise(text):
34
  return speech.cpu()
35
 
36
  def speech_to_speech_translation(audio):
37
- translated_text = recognize_speech(audio)
38
  synthesised_speech = synthesise(translated_text)
39
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
40
  return 16000, synthesised_speech
41
 
42
  title = "Cascaded STST"
43
  description = """
44
- Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses Google Web Speech API for automatic speech recognition, and lightweight text-to-speech and vocoder models.
45
  """
46
 
47
  demo = gr.Blocks()
@@ -66,4 +61,4 @@ file_translate = gr.Interface(
66
  with demo:
67
  gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
68
 
69
- demo.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
 
4
  from datasets import load_dataset
5
+
6
+ from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
7
 
8
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
9
 
10
+ # load speech translation checkpoint
11
+ asr_pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=device)
12
+
13
+ # load text-to-speech checkpoint and speaker embeddings
14
  processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech")
15
 
16
  model = SpeechT5ForTextToSpeech.from_pretrained("ttskit/ttskit-tts-ljspeech").to(device)
 
19
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
20
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
21
 
22
+ def translate(audio):
23
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
24
+ return outputs["text"]
 
 
25
 
26
  def synthesise(text):
27
  inputs = processor(text=text, return_tensors="pt")
 
29
  return speech.cpu()
30
 
31
  def speech_to_speech_translation(audio):
32
+ translated_text = translate(audio)
33
  synthesised_speech = synthesise(translated_text)
34
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
35
  return 16000, synthesised_speech
36
 
37
  title = "Cascaded STST"
38
  description = """
39
+ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses Facebook's [Wav2Vec 2.0](https://huggingface.co/facebook/wav2vec2-base-960h) model for speech recognition, and a lightweight text-to-speech model ([ttskit/ttskit-tts-ljspeech](https://huggingface.co/ttskit/ttskit-tts-ljspeech)) along with a lightweight vocoder ([ljspeech/vocoder-cryptron](https://huggingface.co/ljspeech/vocoder-cryptron)).
40
  """
41
 
42
  demo = gr.Blocks()
 
61
  with demo:
62
  gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
63
 
64
+ demo.launch()