ABIDFAYAZ commited on
Commit
ed9373e
·
verified ·
1 Parent(s): ccf747d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -17
app.py CHANGED
@@ -1,33 +1,32 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import yagmail
4
 
5
  # Load the pre-trained models for transcription and summarization
6
  asr_model = pipeline("automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-english")
7
  summarization_model = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
8
 
9
- # Function to transcribe, summarize, and send email
10
- def transcribe_summarize_email(audio, email):
 
 
 
 
 
 
 
11
  transcription = asr_model(audio)["text"]
12
  summary = summarization_model(transcription, max_length=130, min_length=30, do_sample=False)[0]["summary_text"]
13
 
14
- # Send email with transcription and summary
15
- yag = yagmail.SMTP('[email protected]', 'jatc hwka ejhq awhi')
16
- contents = [
17
- f"Transcription:\n{transcription}",
18
- f"\nSummary:\n{summary}"
19
- ]
20
- yag.send(to=email, subject="Meeting Transcription and Summary", contents=contents)
21
-
22
- return transcription, summary, f"Email sent to {email}"
23
 
24
  # Create a Gradio interface
25
  interface = gr.Interface(
26
- fn=transcribe_summarize_email,
27
- inputs=[gr.Audio(type="filepath"), gr.Textbox(label="Recipient Email")],
28
- outputs=["text", "text", "text"],
29
- title="Meeting Transcription, Summarization, and Email",
30
- description="Upload an audio file to get a transcription, summary, and send them via email."
31
  )
32
 
33
  # Launch the interface
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import numpy as np
4
 
5
  # Load the pre-trained models for transcription and summarization
6
  asr_model = pipeline("automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-english")
7
  summarization_model = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
8
 
9
+ # Function to transcribe and summarize
10
+ def transcribe_and_summarize(audio):
11
+ if audio is None:
12
+ return "Error: No audio file provided.", None
13
+
14
+ # Check if the audio is from a mic recording (tuple) or file (ndarray)
15
+ if isinstance(audio, tuple): # Mic recordings are returned as (sample_rate, data)
16
+ audio = np.array(audio[1], dtype=np.float32)
17
+
18
  transcription = asr_model(audio)["text"]
19
  summary = summarization_model(transcription, max_length=130, min_length=30, do_sample=False)[0]["summary_text"]
20
 
21
+ return transcription, summary
 
 
 
 
 
 
 
 
22
 
23
  # Create a Gradio interface
24
  interface = gr.Interface(
25
+ fn=transcribe_and_summarize,
26
+ inputs=gr.Audio(type="filepath"),
27
+ outputs=["text", "text"],
28
+ title="Meeting Transcription and Summarization",
29
+ description="Upload an audio file or record using the mic to get a transcription and summary."
30
  )
31
 
32
  # Launch the interface