|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline |
|
|
|
|
|
|
|
model_name = "deepset/roberta-base-squad2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForQuestionAnswering.from_pretrained(model_name) |
|
|
|
|
|
fixed_context = """Ishaan is a 6-year-old kid. He is very good at football. He is a very good sportsperson. |
|
He is a smart kid. He can run very fast, as fast as 10 meters in 1 minute. |
|
He goes to Vidyani Ketan School. He goes to school from 8 am to 3:30 pm. |
|
Ishaan has many friends. Vineet is Ishaan's brother.""" |
|
|
|
|
|
def get_answer(question): |
|
QA_input = { |
|
'question': question, |
|
'context': fixed_context |
|
} |
|
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) |
|
res = nlp(QA_input) |
|
return res['answer'] |
|
|
|
|
|
gradio_ui = gr.Interface( |
|
fn=get_answer, |
|
inputs=gr.Textbox(label="Question"), |
|
outputs=gr.Textbox(label="Answer"), |
|
) |
|
|
|
|
|
gradio_ui.launch() |
|
|