|
import streamlit as st |
|
import torch |
|
import bitsandbytes |
|
import accelerate |
|
import scipy |
|
from PIL import Image |
|
import torch.nn as nn |
|
from my_model.object_detection import detect_and_draw_objects |
|
from my_model.captioner.image_captioning import get_caption |
|
from my_model.utilities import free_gpu_resources |
|
|
|
|
|
|
|
|
|
def load_caption_model(): |
|
st.write("Placeholder for load_caption_model function") |
|
return None, None |
|
|
|
def answer_question(image, question, model, processor): |
|
return "Placeholder answer for the question" |
|
|
|
def get_caption(image): |
|
return "Generated caption for the image" |
|
|
|
def free_gpu_resources(): |
|
pass |
|
|
|
|
|
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg", |
|
"Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg", |
|
"Files/sample7.jpg"] |
|
|
|
def run_inference(): |
|
st.title("Run Inference") |
|
image_qa_and_object_detection() |
|
|
|
def image_qa_and_object_detection(): |
|
|
|
st.subheader("Talk to your image") |
|
image_qa_app() |
|
|
|
|
|
st.subheader("Object Detection") |
|
object_detection_app() |
|
|
|
def image_qa_app(): |
|
|
|
if 'images_qa_history' not in st.session_state: |
|
st.session_state['images_qa_history'] = [] |
|
|
|
|
|
if st.button('Clear All'): |
|
st.session_state['images_qa_history'] = [] |
|
st.experimental_rerun() |
|
|
|
|
|
uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"]) |
|
|
|
|
|
st.write("Or choose from sample images:") |
|
for idx, sample_image_path in enumerate(sample_images): |
|
if st.button(f"Use Sample Image {idx+1}", key=f"sample_{idx}"): |
|
uploaded_image = Image.open(sample_image_path) |
|
process_uploaded_image(uploaded_image) |
|
|
|
if uploaded_image is not None: |
|
image = Image.open(uploaded_image) |
|
process_uploaded_image(image) |
|
|
|
def process_uploaded_image(image): |
|
current_image_key = image.filename |
|
|
|
if not any(info['image_key'] == current_image_key for info in st.session_state['images_qa_history']): |
|
st.session_state['images_qa_history'].append({ |
|
'image_key': current_image_key, |
|
'image': image, |
|
'qa_history': [] |
|
}) |
|
|
|
|
|
for image_info in st.session_state['images_qa_history']: |
|
st.image(image_info['image'], caption='Uploaded Image.', use_column_width=True) |
|
for q, a in image_info['qa_history']: |
|
st.text(f"Q: {q}\nA: {a}\n") |
|
|
|
|
|
if image_info['image_key'] == current_image_key: |
|
|
|
question_key = f"question_{current_image_key}" |
|
button_key = f"button_{current_image_key}" |
|
|
|
|
|
question = st.text_input("Ask a question about this image:", key=question_key) |
|
|
|
|
|
if st.button('Get Answer', key=button_key): |
|
|
|
answer = answer_question(image_info['image'], question, None, None) |
|
image_info['qa_history'].append((question, answer)) |
|
st.experimental_rerun() |
|
|
|
|
|
def object_detection_app(): |
|
|
|
pass |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|