Update functions.py
Browse files- functions.py +7 -6
functions.py
CHANGED
@@ -3,7 +3,7 @@ import os
|
|
3 |
import random
|
4 |
import openai
|
5 |
import yt_dlp
|
6 |
-
from pytube import YouTube
|
7 |
import pandas as pd
|
8 |
import plotly_express as px
|
9 |
import nltk
|
@@ -225,10 +225,11 @@ def inference(link, upload, _asr_model):
|
|
225 |
chunks = song[::twenty_minutes]
|
226 |
|
227 |
transcriptions = []
|
228 |
-
|
|
|
229 |
for i, chunk in enumerate(chunks):
|
230 |
-
chunk.export(f'output/chunk_{i}_{
|
231 |
-
transcriptions.append(load_whisper_api(f'output/chunk_{i}_{
|
232 |
|
233 |
results = ','.join(transcriptions)
|
234 |
|
@@ -263,10 +264,10 @@ def inference(link, upload, _asr_model):
|
|
263 |
transcriptions = []
|
264 |
|
265 |
st.info("`Transcribing uploaded audio...`")
|
266 |
-
|
267 |
for i, chunk in enumerate(chunks):
|
268 |
chunk.export(f'output/chunk_{i}.mp4', format='mp4')
|
269 |
-
transcriptions.append(load_whisper_api('output/chunk_{i}.mp4')['text'])
|
270 |
|
271 |
results = ','.join(transcriptions)
|
272 |
|
|
|
3 |
import random
|
4 |
import openai
|
5 |
import yt_dlp
|
6 |
+
from pytube import YouTube, extract
|
7 |
import pandas as pd
|
8 |
import plotly_express as px
|
9 |
import nltk
|
|
|
225 |
chunks = song[::twenty_minutes]
|
226 |
|
227 |
transcriptions = []
|
228 |
+
|
229 |
+
video_id = extract.video_id(link)
|
230 |
for i, chunk in enumerate(chunks):
|
231 |
+
chunk.export(f'output/chunk_{i}_{video_id}.mp4', format='mp4')
|
232 |
+
transcriptions.append(load_whisper_api(f'output/chunk_{i}_{video_id}.mp4')['text'])
|
233 |
|
234 |
results = ','.join(transcriptions)
|
235 |
|
|
|
264 |
transcriptions = []
|
265 |
|
266 |
st.info("`Transcribing uploaded audio...`")
|
267 |
+
|
268 |
for i, chunk in enumerate(chunks):
|
269 |
chunk.export(f'output/chunk_{i}.mp4', format='mp4')
|
270 |
+
transcriptions.append(load_whisper_api(f'output/chunk_{i}.mp4')['text'])
|
271 |
|
272 |
results = ','.join(transcriptions)
|
273 |
|