File size: 1,956 Bytes
28cd99e
7bc4184
 
 
28cd99e
 
 
 
7bc4184
 
28cd99e
7bc4184
28cd99e
7bc4184
 
 
28cd99e
 
 
 
096dea5
 
 
11acde6
ce76330
 
 
 
 
11acde6
28cd99e
 
 
 
 
 
 
 
 
 
 
 
1998cc5
096dea5
9e8f0e1
 
096dea5
 
 
 
 
28cd99e
 
 
 
9e8f0e1
1998cc5
28cd99e
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Importing all the necessary packages
import nltk
import torch
import gradio as gr
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
import numpy as np

# Downloading the necessary NLTK data
nltk.download("punkt")

# Loading the pre-trained model and the processor
model_name = "facebook/wav2vec2-base-960h"
processor = Wav2Vec2Processor.from_pretrained(model_name)
model = Wav2Vec2ForCTC.from_pretrained(model_name)

def correct_casing(input_sentence):
    sentences = nltk.sent_tokenize(input_sentence)
    return ' '.join([s.replace(s[0], s[0].capitalize(), 1) for s in sentences])

def asr_transcript(audio):
    if audio is None or len(audio) == 0:
        return ""
    
    # Ensure audio is a 1D numpy array
    if isinstance(audio, list):
        audio = np.array(audio)
    
    if audio.ndim > 1:
        audio = audio.flatten()

    # Process the audio
    input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
    # Get logits
    logits = model(input_values).logits
    # Get predicted IDs
    predicted_ids = torch.argmax(logits, dim=-1)
    # Decode the IDs to text
    transcription = processor.decode(predicted_ids[0])
    # Correct the casing
    transcription = correct_casing(transcription.lower())
    return transcription

def real_time_asr(audio, state=""):
    try:
        if isinstance(audio, dict) and 'array' in audio:
            audio = audio['array']
        transcription = asr_transcript(audio)
        state += " " + transcription
        return state, state
    except Exception as e:
        return str(e), state

# Create the Gradio interface
iface = gr.Interface(
    fn=real_time_asr,
    inputs=[gr.Audio(streaming=True), gr.State()],
    outputs=[gr.Textbox(), gr.State()],
    live=True,
    title="Real-Time ASR using Wav2Vec 2.0",
    description="This application displays transcribed text in real-time for given audio input"
)

# Launch the interface
iface.launch()