paligemma-docci / app.py
sitammeur's picture
Update app.py
d3dcf57 verified
raw
history blame
2 kB
# Installing the latest version of the transformers library
import os
os.system("pip install ./transformers-4.47.0.dev0-py3-none-any.whl")
# Importing the requirements
import warnings
warnings.filterwarnings("ignore")
import gradio as gr
from src.app.response import describe_image
# Image, text query, and input parameters
image = gr.Image(type="pil", label="Image")
text = gr.Textbox(label="Question", placeholder="Enter your question here")
max_new_tokens = gr.Slider(
minimum=20, maximum=160, step=10, value=80, label="Max Tokens"
)
# Output for the interface
answer = gr.Textbox(label="Predicted answer", show_label=True, show_copy_button=True)
# Examples for the interface
examples = [
[
"images/cat.jpg",
"How many cats are there?",
80,
],
[
"images/dog.jpg",
"What color is the dog?",
80,
],
[
"images/bird.jpg",
"What is the bird doing?",
160,
],
]
# Title, description, and article for the interface
title = "Visual Question Answering"
description = "Gradio Demo for the PaliGemma 2 Vision Language Understanding and Generation model. This model can answer questions about images in natural language. To use it, upload your image, type a question, select associated parameters, use the default values, click 'Submit', or click one of the examples to load them. You can read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2412.03555' target='_blank'>Model Paper</a> | <a href='https://huggingface.co/google/paligemma2-3b-ft-docci-448' target='_blank'>Model Page</a></p>"
# Launch the interface
interface = gr.Interface(
fn=describe_image,
inputs=[image, text, max_new_tokens],
outputs=answer,
examples=examples,
cache_examples=True,
cache_mode="lazy",
title=title,
description=description,
article=article,
theme="Nymbo/Nymbo_Theme",
flagging_mode="never",
)
interface.launch(debug=False)