|
python -m pip install SpeechRecognition |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import torch |
|
import speech_recognition as sr |
|
from datasets import load_dataset |
|
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor |
|
|
|
device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
processor = SpeechT5Processor.from_pretrained("ttskit/ttskit-tts-ljspeech") |
|
|
|
model = SpeechT5ForTextToSpeech.from_pretrained("ttskit/ttskit-tts-ljspeech").to(device) |
|
vocoder = SpeechT5HifiGan.from_pretrained("ljspeech/vocoder-cryptron").to(device) |
|
|
|
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") |
|
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) |
|
|
|
def recognize_speech(audio): |
|
recognizer = sr.Recognizer() |
|
with sr.AudioFile(audio) as source: |
|
audio_data = recognizer.record(source) |
|
return recognizer.recognize_google(audio_data) |
|
|
|
def synthesise(text): |
|
inputs = processor(text=text, return_tensors="pt") |
|
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder) |
|
return speech.cpu() |
|
|
|
def speech_to_speech_translation(audio): |
|
translated_text = recognize_speech(audio) |
|
synthesised_speech = synthesise(translated_text) |
|
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16) |
|
return 16000, synthesised_speech |
|
|
|
title = "Cascaded STST" |
|
description = """ |
|
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses Google Web Speech API for automatic speech recognition, and lightweight text-to-speech and vocoder models. |
|
""" |
|
|
|
demo = gr.Blocks() |
|
|
|
mic_translate = gr.Interface( |
|
fn=speech_to_speech_translation, |
|
inputs=gr.Audio(source="microphone", type="filepath"), |
|
outputs=gr.Audio(label="Generated Speech", type="numpy"), |
|
title=title, |
|
description=description, |
|
) |
|
|
|
file_translate = gr.Interface( |
|
fn=speech_to_speech_translation, |
|
inputs=gr.Audio(source="upload", type="filepath"), |
|
outputs=gr.Audio(label="Generated Speech", type="numpy"), |
|
examples=[["./example.wav"]], |
|
title=title, |
|
description=description, |
|
) |
|
|
|
with demo: |
|
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"]) |
|
|
|
demo.launch() |
|
|