Spaces:
Sleeping
Sleeping
File size: 1,587 Bytes
28cd99e 7bc4184 28cd99e 7bc4184 28cd99e 7bc4184 28cd99e 7bc4184 28cd99e 1998cc5 28cd99e f2e4070 1998cc5 28cd99e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
# Importing all the necessary packages
import nltk
import torch
import gradio as gr
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
import numpy as np
# Downloading the necessary NLTK data
nltk.download("punkt")
# Loading the pre-trained model and the processor
model_name = "facebook/wav2vec2-base-960h"
processor = Wav2Vec2Processor.from_pretrained(model_name)
model = Wav2Vec2ForCTC.from_pretrained(model_name)
def correct_casing(input_sentence):
sentences = nltk.sent_tokenize(input_sentence)
return ' '.join([s.replace(s[0], s[0].capitalize(), 1) for s in sentences])
def asr_transcript(audio):
# Process the audio
input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
# Get logits
logits = model(input_values).logits
# Get predicted IDs
predicted_ids = torch.argmax(logits, dim=-1)
# Decode the IDs to text
transcription = processor.decode(predicted_ids[0])
# Correct the casing
transcription = correct_casing(transcription.lower())
return transcription
def real_time_asr(audio, state=""):
audio = np.array(audio)
transcription = asr_transcript(audio)
state += " " + transcription
return state, state
# Create the Gradio interface
iface = gr.Interface(
fn=real_time_asr,
inputs=[gr.Audio(streaming=True), gr.State()],
outputs=[gr.Textbox(), gr.State()],
live=True,
title="Real-Time ASR using Wav2Vec 2.0",
description="This application displays transcribed text in real-time for given audio input"
)
# Launch the interface
iface.launch()
|