Spaces:
Runtime error
Runtime error
krishnasai99
commited on
Commit
·
27999b6
1
Parent(s):
49d2a49
Update app.py
Browse files
app.py
CHANGED
@@ -5,11 +5,22 @@ from transformers import HubertForCTC, Wav2Vec2Processor , pipeline , Wav2Vec2Fo
|
|
5 |
import torch
|
6 |
import spacy
|
7 |
from spacy import displacy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
st.title('Audio-to-Text')
|
10 |
|
11 |
audio_file = st.file_uploader('Upload Audio' , type=['wav' , 'mp3','m4a'])
|
12 |
|
|
|
|
|
|
|
13 |
if st.button('Trascribe Audio'):
|
14 |
if audio_file is not None:
|
15 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
@@ -19,7 +30,9 @@ if st.button('Trascribe Audio'):
|
|
19 |
logits = model(input_values).logits
|
20 |
predicted_ids = torch.argmax(logits, dim=-1)
|
21 |
text = processor.batch_decode(predicted_ids)
|
22 |
-
|
|
|
|
|
23 |
else:
|
24 |
st.error('please upload the audio file')
|
25 |
|
@@ -33,8 +46,10 @@ if st.button('Summarize'):
|
|
33 |
logits = model(input_values).logits
|
34 |
predicted_ids = torch.argmax(logits, dim=-1)
|
35 |
text = processor.batch_decode(predicted_ids)
|
|
|
|
|
36 |
summarize = pipeline("summarization")
|
37 |
-
st.
|
38 |
|
39 |
if st.button('sentiment-analysis'):
|
40 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
@@ -44,8 +59,10 @@ if st.button('sentiment-analysis'):
|
|
44 |
logits = model(input_values).logits
|
45 |
predicted_ids = torch.argmax(logits, dim=-1)
|
46 |
text = processor.batch_decode(predicted_ids)
|
|
|
|
|
47 |
nlp_sa = pipeline("sentiment-analysis")
|
48 |
-
st.
|
49 |
|
50 |
if st.button('Name'):
|
51 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
@@ -55,7 +72,41 @@ if st.button('Name'):
|
|
55 |
logits = model(input_values).logits
|
56 |
predicted_ids = torch.argmax(logits, dim=-1)
|
57 |
text = processor.batch_decode(predicted_ids)
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import torch
|
6 |
import spacy
|
7 |
from spacy import displacy
|
8 |
+
import en_core_web_sm
|
9 |
+
import spacy.cli
|
10 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
11 |
+
import nltk
|
12 |
+
from nltk import tokenize
|
13 |
+
nltk.download('punkt')
|
14 |
+
import spacy_streamlit
|
15 |
+
|
16 |
|
17 |
st.title('Audio-to-Text')
|
18 |
|
19 |
audio_file = st.file_uploader('Upload Audio' , type=['wav' , 'mp3','m4a'])
|
20 |
|
21 |
+
st.title( 'Please select any of the NLP tasks')
|
22 |
+
|
23 |
+
|
24 |
if st.button('Trascribe Audio'):
|
25 |
if audio_file is not None:
|
26 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
|
|
30 |
logits = model(input_values).logits
|
31 |
predicted_ids = torch.argmax(logits, dim=-1)
|
32 |
text = processor.batch_decode(predicted_ids)
|
33 |
+
summary_list = [str(sentence) for sentence in text]
|
34 |
+
result = ' '.join(summary_list)
|
35 |
+
st.markdown(result)
|
36 |
else:
|
37 |
st.error('please upload the audio file')
|
38 |
|
|
|
46 |
logits = model(input_values).logits
|
47 |
predicted_ids = torch.argmax(logits, dim=-1)
|
48 |
text = processor.batch_decode(predicted_ids)
|
49 |
+
summary_list = [str(sentence) for sentence in text]
|
50 |
+
result = ' '.join(summary_list)
|
51 |
summarize = pipeline("summarization")
|
52 |
+
st.markdown(summarize(result)[0]['summary_text'])
|
53 |
|
54 |
if st.button('sentiment-analysis'):
|
55 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
|
|
59 |
logits = model(input_values).logits
|
60 |
predicted_ids = torch.argmax(logits, dim=-1)
|
61 |
text = processor.batch_decode(predicted_ids)
|
62 |
+
summary_list = [str(sentence) for sentence in text]
|
63 |
+
result = ' '.join(summary_list)
|
64 |
nlp_sa = pipeline("sentiment-analysis")
|
65 |
+
st.markdown(nlp_sa(result))
|
66 |
|
67 |
if st.button('Name'):
|
68 |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
|
|
72 |
logits = model(input_values).logits
|
73 |
predicted_ids = torch.argmax(logits, dim=-1)
|
74 |
text = processor.batch_decode(predicted_ids)
|
75 |
+
summary_list = [str(sentence) for sentence in text]
|
76 |
+
result = ' '.join(summary_list)
|
77 |
+
nlp = spacy.load('en_core_web_sm')
|
78 |
+
doc=nlp(result)
|
79 |
+
spacy_streamlit.visualize_ner(doc, labels=nlp.get_pipe("ner").labels, title= "List of Entities")
|
80 |
+
|
81 |
+
|
82 |
+
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
83 |
+
|
84 |
+
@st.cache(allow_output_mutation=True)
|
85 |
+
def load_model():
|
86 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
|
87 |
+
return model
|
88 |
+
|
89 |
+
model1 = load_model()
|
90 |
+
|
91 |
+
st.subheader('Select your source and target language below.')
|
92 |
+
source_lang = st.selectbox("Source language",['English'])
|
93 |
+
target_lang = st.selectbox("Target language",['German','French'])
|
94 |
+
|
95 |
+
|
96 |
+
if st.button('Translate'):
|
97 |
+
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
98 |
+
model = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
|
99 |
+
speech, rate = librosa.load(audio_file, sr=16000)
|
100 |
+
input_values = processor(speech, return_tensors="pt", padding="longest", sampling_rate=rate).input_values
|
101 |
+
logits = model(input_values).logits
|
102 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
103 |
+
text = processor.batch_decode(predicted_ids)
|
104 |
+
summary_list = [str(sentence) for sentence in text]
|
105 |
+
result = ' '.join(summary_list)
|
106 |
+
prefix = 'translate '+str(source_lang)+' to '+str(target_lang)
|
107 |
+
sentence_token = tokenize.sent_tokenize(result)
|
108 |
+
output = tokenizer([prefix+sentence for sentence in sentence_token], padding=True, return_tensors="pt")
|
109 |
+
translated_id = model1.generate(output["input_ids"], attention_mask=output['attention_mask'], max_length=100)
|
110 |
+
translated_word = tokenizer.batch_decode(translated_id, skip_special_tokens=True)
|
111 |
+
st.subheader('Translated Text')
|
112 |
+
st.write(' '.join(translated_word))
|