sanchit-gandhi commited on
Commit
b191b97
·
1 Parent(s): 87505e7

add example

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE
6
  title = "Whisper JAX: The Fastest Whisper API ⚡️"
7
 
8
  description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **12x** faster, making it the fastest Whisper API available."
9
- #description += "\nYou can submit requests to Whisper JAX through this Gradio Demo, or directly through API calls (see below). This notebook demonstrates how you can run the Whisper JAX model yourself on a TPU v2-8 in a Google Colab: TODO."
10
 
11
  API_URL = "https://whisper-jax.ngrok.io/generate/"
12
 
@@ -112,6 +112,7 @@ youtube = gr.Interface(
112
  ],
113
  allow_flagging="never",
114
  title=title,
 
115
  description=description,
116
  article=article,
117
  )
 
6
  title = "Whisper JAX: The Fastest Whisper API ⚡️"
7
 
8
  description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **12x** faster, making it the fastest Whisper API available."
9
+ # description += "\nYou can submit requests to Whisper JAX through this Gradio Demo, or directly through API calls (see below). This notebook demonstrates how you can run the Whisper JAX model yourself on a TPU v2-8 in a Google Colab: TODO."
10
 
11
  API_URL = "https://whisper-jax.ngrok.io/generate/"
12
 
 
112
  ],
113
  allow_flagging="never",
114
  title=title,
115
+ examples=[["https://www.youtube.com/watch?v=m8u-18Q0s7I", "transcribe", False]],
116
  description=description,
117
  article=article,
118
  )