Janoah commited on
Commit
b9ceaf9
·
1 Parent(s): fbb2641

Final commit

Browse files
Files changed (2) hide show
  1. app.py +99 -18
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,13 +1,9 @@
1
  import numpy as np
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
-
5
- def flip_text(x):
6
- return x[::-1]
7
-
8
- def flip_image(x):
9
- return np.fliplr(x)
10
 
 
 
 
11
  tokenizer = AutoTokenizer.from_pretrained("suriya7/bart-finetuned-text-summarization")
12
  model = AutoModelForSeq2SeqLM.from_pretrained("suriya7/bart-finetuned-text-summarization")
13
 
@@ -17,23 +13,108 @@ def generate_summary(text):
17
  summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False)
18
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
19
  return summary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- with gr.Blocks() as main:
22
  gr.Markdown("My AI interface")
23
  with gr.Tab("Single models"):
24
- text_to_summarize = gr.Textbox(label="Text to summarize")
25
- summary_output = gr.Textbox(label="Summary")
26
- summarize_btn = gr.Button("Summarize")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  with gr.Tab("Multi models"):
30
  with gr.Row():
31
- image_input = gr.Image()
32
- image_output = gr.Image()
33
- image_button = gr.Button("Flip")
34
 
35
- # text_button.click(flip_text, inputs=text_input, outputs=text_output)
36
- image_button.click(flip_image, inputs=image_input, outputs=image_output)
37
- summarize_btn.click(generate_summary, inputs=text_to_summarize, outputs=summary_output)
 
 
 
 
 
 
38
 
39
- main.launch()
 
1
  import numpy as np
2
  import gradio as gr
 
 
 
 
 
 
 
3
 
4
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline, T5Tokenizer, T5Model, BertTokenizer, BertModel, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM
5
+
6
+ # 1. GENERATE SUMMARY
7
  tokenizer = AutoTokenizer.from_pretrained("suriya7/bart-finetuned-text-summarization")
8
  model = AutoModelForSeq2SeqLM.from_pretrained("suriya7/bart-finetuned-text-summarization")
9
 
 
13
  summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False)
14
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
15
  return summary
16
+
17
+
18
+
19
+ # 2. TRANSLATE FUNCTION
20
+ t5_tokenizer = T5Tokenizer.from_pretrained('t5-small')
21
+ t5_model = T5ForConditionalGeneration.from_pretrained('t5-small')
22
+
23
+ def translate_text(text_to_translate, original_language, destination_language):
24
+ input_text = "translate "+original_language+" to "+destination_language+": "+text_to_translate
25
+
26
+ input_ids = t5_tokenizer.encode(input_text, return_tensors='pt')
27
+
28
+ outputs = t5_model.generate(input_ids)
29
+ output_text = t5_tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+
31
+ return(output_text)
32
+
33
+
34
+
35
+ # 4. QUESTION ANSWERING FUNCTION
36
+ def question_answering(question,context):
37
+ qa_model = pipeline("question-answering", "timpal0l/mdeberta-v3-base-squad2")
38
+ question = question
39
+ context = context
40
+ solution = qa_model(question = question, context = context)
41
+ return solution['answer']
42
+
43
+
44
+
45
+ # 5. PARAPHRASING FUNCTION
46
+ paraphrasing_tokenizer = AutoTokenizer.from_pretrained("vngrs-ai/VBART-Large-Paraphrasing", model_input_names=['input_ids', 'attention_mask'])
47
+ paraphrasing_model = AutoModelForSeq2SeqLM.from_pretrained("vngrs-ai/VBART-Large-Paraphrasing")
48
+
49
+ def paraphrasing(text):
50
+ input_text= text
51
+
52
+ token_input = tokenizer(input_text, return_tensors="pt")#.to('cuda')
53
+ outputs = model.generate(**token_input)
54
+ return(tokenizer.decode(outputs[0]))
55
+
56
+
57
+
58
+
59
 
60
+ with gr.Blocks() as demo:
61
  gr.Markdown("My AI interface")
62
  with gr.Tab("Single models"):
63
+ # 1. GENERATE SUMMARY
64
+ with gr.Accordion("Text summarization"):
65
+ gr.Markdown("Single model summarization using BART model")
66
+ text_to_summarize = gr.Textbox(label="Text to summarize")
67
+ summary_output = gr.Textbox(label="Summary")
68
+ summarize_btn = gr.Button("Summarize")
69
+
70
+
71
+ # 2. TRANSLATE FUNCTION
72
+ with gr.Accordion("Text translation", open=False):
73
+ gr.Markdown("Single model translation using GOOGLE T5 Base model")
74
+ text_to_translate = gr.Textbox(label="Text to translate")
75
+ original_language = gr.Textbox(label="Original language (Write in full form e.g. english)")
76
+ destination_language = gr.Textbox(label="Destination language (Write in full form e.g. deutsch)")
77
+ translate_output = gr.Textbox(label="Translation")
78
+ translate_btn = gr.Button("Translate")
79
+
80
+
81
+ # 3. ..
82
+ with gr.Accordion("Scentence fill mask", open=False):
83
+ gr.Markdown("Single model translation using GOOGLE T5 Base model")
84
+ scentence_To_fill = gr.Textbox(label="Text to translate")
85
+ filled_scentence = gr.Textbox(label="Translation")
86
+ fill_button = gr.Button("Fill scentence")
87
+
88
+
89
+ # 4. QUESTION ANSWERING
90
+ with gr.Accordion("Question answering", open=False):
91
+ gr.Markdown("Single model question answering using GOOGLE mdeberta model")
92
+ question = gr.Textbox(label="Question")
93
+ context = gr.Textbox(label="Context for question")
94
+ answer = gr.Textbox(label="Answer to question")
95
+ ask_question_button = gr.Button("Ask question")
96
+
97
+
98
+ # 5. PARAPHRASING
99
+ with gr.Accordion("Paraphrasing", open=False):
100
+ gr.Markdown("Single model paraphrasing using the VBART model")
101
+ scentence_to_rephrase = gr.Textbox(label="Text to rephrase")
102
+ rephrased_scentence = gr.Textbox(label="Rephrased scentence")
103
+ paraphrase_button = gr.Button("Rephrase scentence")
104
 
105
 
106
  with gr.Tab("Multi models"):
107
  with gr.Row():
108
+ print("No multi models yet..")
 
 
109
 
110
+
111
+ # Button listeners
112
+ summarize_btn.click(generate_summary, inputs=text_to_summarize, outputs=summary_output) # 1. GENERATE SUMMARY
113
+ translate_btn.click(translate_text, inputs=[text_to_translate, original_language, destination_language], outputs=translate_output) # 2. TRANSLATE FUNCTION
114
+
115
+ ask_question_button.click(question_answering, inputs=[question,context], outputs=answer) # 4. QUESTION ANSWERING
116
+ paraphrase_button.click(paraphrasing, inputs=scentence_to_rephrase, outputs=rephrased_scentence) # 5. PARAPHRASING
117
+
118
+
119
 
120
+ demo.launch()
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  torchvision==0.17.2
2
- transformers==4.40.0
 
 
1
  torchvision==0.17.2
2
+ transformers==4.40.0
3
+ sentencepiece==0.2.0