Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
import gradio as gr | |
model_path = 'kahennefer/fine_tuned_gpt2' | |
model = AutoModelForCausalLM.from_pretrained(model_path) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
text_gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def generate_answer(question): | |
result = text_gen_pipeline(question, max_length=100, num_return_sequences=1) | |
return result[0]['generated_text'] | |
iface = gr.Interface( | |
fn=generate_answer, | |
inputs=gr.Textbox(lines=2, placeholder="Ask a question about the case..."), | |
outputs=gr.Text(label="Answer"), | |
title="Case-Specific Question Answering System", | |
description="Ask any question about the case, and the model will provide an answer based on its knowledge." | |
) | |
iface.launch() | |