|
import streamlit as st |
|
import torch |
|
|
|
from transformers import Blip2Processor, Blip2ForConditionalGeneration |
|
|
|
|
|
def load_caption_model(): |
|
|
|
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") |
|
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True,torch_dtype=torch.float16, device_map="auto") |
|
|
|
return model, processor |
|
|
|
|
|
|
|
def answer_question(image, question, model, processor): |
|
|
|
|
|
image = Image.open(image).convert('RGB') |
|
|
|
inputs = processor(image, question, return_tensors="pt").to("cuda", torch.float16) |
|
|
|
out = model.generate(**inputs, max_length=200, min_length=20, num_beams=1) |
|
|
|
answer = processor.decode(out[0], skip_special_tokens=True).strip() |
|
return answer |
|
|
|
st.title("Image Question Answering") |
|
|
|
|
|
image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) |
|
|
|
|
|
question = st.text_input("Enter your question about the image:") |
|
|
|
if st.button("Get Answer"): |
|
if image is not None and question: |
|
|
|
st.image(image, use_column_width=True) |
|
|
|
model, processor = load_caption_model() |
|
answer = answer_question(image, question, model, processor) |
|
st.write(answer) |
|
else: |
|
st.write("Please upload an image and enter a question.") |