import gradio as gr from transformers import AutoModelForSeq2SeqLM, ViTTokenizer # Load your pretrained model and tokenizer model_name = "JPeace18/vit-base-patch16-224-in21k-finetuned-lora-food101" # Replace with your model's name tokenizer = ViTTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # Define the Gradio interface iface = gr.Interface( fn=generate_answer, inputs=[gr.Textbox(lines=5, placeholder="Ask a question")], outputs="textbox", title="AI Answer Generator", ) # Function to generate an answer using your model def generate_answer(question): inputs = tokenizer([question], return_tensors="pt") outputs = model.generate(**inputs) answer = tokenizer.decode(outputs[0], skip_special_tokens=True) return answer # Launch the interface iface.launch() # BASH from ipykernel.zmqshell import KernelManager km = KernelManager() km.start_kernel() kernel = km.kernel from IPython.display import HTML code = """ pip install --upgrade transformers pip install --force-reinstall transformers """ output = kernel.execute(code).get('data', '') html = HTML('
{}
'.format(output)) display(html) kernel.shutdown() from transformers import ViTTokenizer