KB-VQA-E / app.py
m7mdal7aj's picture
Update app.py
bda1cda verified
raw
history blame
5.24 kB
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
from PIL import Image
import torch.nn as nn
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities import free_gpu_resources
from my_model.KBVQA import KBVQA, prepare_kbvqa_model
def answer_question(image, question, model):
answer = model.generate_answer(question, image)
return answer
def get_caption(image):
return "Generated caption for the image"
def free_gpu_resources():
pass
# Sample images (assuming these are paths to your sample images)
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg",
"Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg",
"Files/sample7.jpg"]
def run_inference():
st.title("Run Inference")
# Initialize session state for the model
if 'kbvqa' not in st.session_state:
st.session_state['kbvqa'] = None
# Button to load KBVQA models
if st.button('Load KBVQA Model'):
if st.session_state['kbvqa'] is not None:
st.write("Model already loaded.")
else:
# Call the function to load models and show progress
st.session_state['kbvqa'] = prepare_kbvqa_model('yolov5') # Replace with your model
if st.session_state['kbvqa']:
st.write("Model is ready for inference.")
else:
st.write("Please load the model first")
if st.session_state['kbvqa']:
image_qa_app(st.session_state['kbvqa'])
def image_qa_app(kbvqa):
# Initialize session state for storing the current image and its Q&A history
if 'current_image' not in st.session_state:
st.session_state['current_image'] = None
if 'qa_history' not in st.session_state:
st.session_state['qa_history'] = []
# Display sample images as clickable thumbnails
st.write("Choose from sample images:")
cols = st.columns(len(sample_images))
for idx, sample_image_path in enumerate(sample_images):
with cols[idx]:
image = Image.open(sample_image_path)
# Use button for each image
if st.button(f'Sample Image {idx + 1}', key=f'sample_{idx}'):
st.session_state['current_image'] = image
st.session_state['qa_history'] = []
# Image uploader
uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
if uploaded_image is not None:
image = Image.open(uploaded_image)
st.session_state['current_image'] = image
st.session_state['processed_image'] = copy.deepcopy(image) # Create a copy for processing
st.session_state['qa_history'] = []
# Display the current image (unaltered)
if st.session_state.get('current_image') is not None:
st.image(st.session_state['current_image'], caption='Uploaded Image.', use_column_width=True)
# Question input and processing
question = st.text_input("Ask a question about this image:")
if st.button('Get Answer'):
# Use the processed image for object detection and other processing
processed_image = st.session_state.get('processed_image')
if processed_image:
# Perform object detection or other processing on processed_image
# For example: processed_image = perform_object_detection(processed_image)
answer = answer_question(processed_image, question, model=kbvqa)
st.session_state['qa_history'].append((question, answer))
# Display all Q&A
for q, a in st.session_state['qa_history']:
st.text(f"Q: {q}\nA: {a}\n")
# Update the processed image in session state
st.session_state['processed_image'] = processed_image
# Main function
def main():
st.sidebar.title("Navigation")
selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"])
if selection == "Home":
st.title("MultiModal Learning for Knowledg-Based Visual Question Answering")
st.write("Home page content goes here...")
elif selection == "Dissertation Report":
st.title("Dissertation Report")
st.write("Click the link below to view the PDF.")
# Example to display a link to a PDF
st.download_button(
label="Download PDF",
data=open("Files/Dissertation Report.pdf", "rb"),
file_name="example.pdf",
mime="application/octet-stream"
)
elif selection == "Evaluation Results":
st.title("Evaluation Results")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Dataset Analysis":
st.title("OK-VQA Dataset Analysis")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Run Inference":
run_inference()
elif selection == "Object Detection":
run_object_detection()
if __name__ == "__main__":
main()