Spaces:
Sleeping
Sleeping
techysanoj
commited on
Commit
·
4d14671
1
Parent(s):
6ab3f9b
Upload 3 files
Browse files
README.md
CHANGED
@@ -1,13 +1,11 @@
|
|
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.35.2
|
8 |
-
app_file:
|
9 |
pinned: false
|
10 |
-
license: openrail
|
11 |
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
|
2 |
---
|
3 |
+
title: automatic-speech-recognition
|
4 |
+
emoji: 🔥
|
5 |
+
colorFrom: indigo
|
6 |
+
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
sdk_version: 3.35.2
|
9 |
+
app_file: run.py
|
10 |
pinned: false
|
|
|
11 |
---
|
|
|
|
app.py
CHANGED
@@ -1,37 +1,17 @@
|
|
1 |
-
import torch
|
2 |
-
import torchaudio
|
3 |
-
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
|
4 |
import gradio as gr
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
# Get predicted transcription
|
22 |
-
predicted_ids = torch.argmax(logits, dim=-1)
|
23 |
-
transcription = tokenizer.batch_decode(predicted_ids)[0]
|
24 |
-
|
25 |
-
return transcription
|
26 |
-
|
27 |
-
# Define Gradio interface
|
28 |
-
def speech_recognition(audio_file):
|
29 |
-
transcription = transcribe_speech(audio_file)
|
30 |
-
return transcription
|
31 |
-
|
32 |
-
inputs = gr.inputs.Audio(type="file", label="Upload Audio File")
|
33 |
-
outputs = gr.outputs.Textbox(label="Transcription")
|
34 |
-
interface = gr.Interface(fn=speech_recognition, inputs=inputs, outputs=outputs)
|
35 |
-
|
36 |
-
# Run the Gradio interface
|
37 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
+
|
4 |
+
# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
|
5 |
+
auth_token = os.getenv("auth_token")
|
6 |
+
|
7 |
+
# automatically load the interface from a HF model
|
8 |
+
# you can remove the api_key parameter if you don't care about rate limiting.
|
9 |
+
demo = gr.Interface.load(
|
10 |
+
"huggingface/facebook/wav2vec2-base-960h",
|
11 |
+
title="Speech-to-text",
|
12 |
+
inputs="mic",
|
13 |
+
description="Let me try to guess what you're saying!",
|
14 |
+
api_key=auth_token
|
15 |
+
)
|
16 |
+
|
17 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
|
4 |
+
# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
|
5 |
+
auth_token = os.getenv("auth_token")
|
6 |
+
|
7 |
+
# automatically load the interface from a HF model
|
8 |
+
# you can remove the api_key parameter if you don't care about rate limiting.
|
9 |
+
demo = gr.load(
|
10 |
+
"huggingface/facebook/wav2vec2-base-960h",
|
11 |
+
title="Speech-to-text",
|
12 |
+
inputs="mic",
|
13 |
+
description="Let me try to guess what you're saying!",
|
14 |
+
hf_token=auth_token
|
15 |
+
)
|
16 |
+
|
17 |
+
demo.launch()
|