|
import streamlit as st |
|
import torch |
|
import bitsandbytes |
|
import accelerate |
|
import scipy |
|
from PIL import Image |
|
import torch.nn as nn |
|
from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration |
|
from my_model.object_detection import detect_and_draw_objects |
|
from my_model.captioner.image_captioning import get_caption |
|
from my_model.utilities import free_gpu_resources |
|
|
|
|
|
|
|
|
|
def load_caption_model(): |
|
st.write("Placeholder for load_caption_model function") |
|
return None, None |
|
|
|
def answer_question(image, question, model, processor): |
|
return "Placeholder answer for the question" |
|
|
|
def detect_and_draw_objects(image, model_name, threshold): |
|
return image, "Detected objects" |
|
|
|
def get_caption(image): |
|
return "Generated caption for the image" |
|
|
|
def free_gpu_resources(): |
|
pass |
|
|
|
|
|
def main(): |
|
st.sidebar.title("Navigation") |
|
selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"]) |
|
|
|
if selection == "Home": |
|
display_home() |
|
elif selection == "Dissertation Report": |
|
display_dissertation_report() |
|
elif selection == "Evaluation Results": |
|
display_evaluation_results() |
|
elif selection == "Dataset Analysis": |
|
display_dataset_analysis() |
|
elif selection == "Run Inference": |
|
run_inference() |
|
elif selection == "Object Detection": |
|
run_object_detection() |
|
|
|
def display_home(): |
|
st.title("MultiModal Learning for Knowledge-Based Visual Question Answering") |
|
st.write("Home page content goes here...") |
|
|
|
def display_dissertation_report(): |
|
st.title("Dissertation Report") |
|
st.write("Click the link below to view the PDF.") |
|
st.download_button( |
|
label="Download PDF", |
|
data=open("Files/Dissertation Report.pdf", "rb"), |
|
file_name="example.pdf", |
|
mime="application/octet-stream" |
|
) |
|
|
|
def display_evaluation_results(): |
|
st.title("Evaluation Results") |
|
st.write("This is a Place Holder until the contents are uploaded.") |
|
|
|
def display_dataset_analysis(): |
|
st.title("OK-VQA Dataset Analysis") |
|
st.write("This is a Place Holder until the contents are uploaded.") |
|
|
|
def run_inference(): |
|
st.title("Image-based Q&A App") |
|
|
|
image_qa_app() |
|
|
|
def run_object_detection(): |
|
st.title("Object Detection") |
|
|
|
|
|
|
|
def image_qa_app(): |
|
|
|
if 'images_qa_history' not in st.session_state: |
|
st.session_state['images_qa_history'] = [] |
|
|
|
|
|
if st.button('Clear All'): |
|
st.session_state['images_qa_history'] = [] |
|
st.experimental_rerun() |
|
|
|
|
|
uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"]) |
|
|
|
if uploaded_image is not None: |
|
image = Image.open(uploaded_image) |
|
current_image_key = uploaded_image.name |
|
|
|
|
|
if not any(info['image_key'] == current_image_key for info in st.session_state['images_qa_history']): |
|
st.session_state['images_qa_history'].append({ |
|
'image_key': current_image_key, |
|
'image': image, |
|
'qa_history': [] |
|
}) |
|
|
|
|
|
for image_info in st.session_state['images_qa_history']: |
|
st.image(image_info['image'], caption='Uploaded Image.', use_column_width=True) |
|
for q, a in image_info['qa_history']: |
|
st.text(f"Q: {q}\nA: {a}\n") |
|
|
|
|
|
if image_info['image_key'] == current_image_key: |
|
|
|
question_key = f"question_{current_image_key}" |
|
button_key = f"button_{current_image_key}" |
|
|
|
|
|
question = st.text_input("Ask a question about this image:", key=question_key) |
|
|
|
|
|
if st.button('Get Answer', key=button_key): |
|
|
|
answer = get_answer(image_info['image'], question) |
|
image_info['qa_history'].append((question, answer)) |
|
st.experimental_rerun() |
|
|
|
def get_answer(image, question): |
|
|
|
return "Sample answer based on the image and question." |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|