Check / app.py
Rajesh3338's picture
Update app.py
16fd3b1 verified
import gradio as gr
import torch
from transformers import PaliGemmaProcessor, PaliGemmaForConditionalGeneration
from PIL import Image
# Load model and processor
model_id = "google/paligemma2-28b-mix-448"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto").eval()
processor = PaliGemmaProcessor.from_pretrained(model_id)
def generate_description(image, prompt):
if image is None:
return "Please upload an image."
model_inputs = processor(text=prompt, images=image, return_tensors="pt").to(torch.bfloat16).to(model.device)
input_len = model_inputs["input_ids"].shape[-1]
with torch.inference_mode():
generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
generation = generation[0][input_len:]
decoded = processor.decode(generation, skip_special_tokens=True)
return decoded
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# PaliGemma Image Captioning")
image_input = gr.Image(type="pil", label="Upload Image")
prompt_input = gr.Textbox(label="Enter Prompt", value="describe en")
output_text = gr.Textbox(label="Generated Description")
submit_button = gr.Button("Generate")
submit_button.click(generate_description, inputs=[image_input, prompt_input], outputs=output_text)
demo.launch()