import streamlit as st import torch import bitsandbytes import accelerate import scipy from PIL import Image import torch.nn as nn from my_model.object_detection import detect_and_draw_objects from my_model.captioner.image_captioning import get_caption from my_model.utilities import free_gpu_resources from my_model.KBVQA import KBVQA, prepare_kbvqa_model def answer_question(image, question, model): answer = model.generate_answer(question, image) return answer def get_caption(image): return "Generated caption for the image" def free_gpu_resources(): pass # Sample images (assuming these are paths to your sample images) sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg", "Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg", "Files/sample7.jpg"] def run_inference(): st.title("Run Inference") # Initialize session state for the model if 'kbvqa' not in st.session_state: st.session_state['kbvqa'] = None # Button to load KBVQA models if st.button('Load KBVQA Models'): if st.session_state['kbvqa'] is not None: st.write("Model already loaded.") else: # Call the function to load models and show progress st.session_state['kbvqa'] = prepare_kbvqa_model('yolov5') # Replace with your model if st.session_state['kbvqa']: st.write("Model is ready for inference.") else: st.write("Please load the model first") if st.session_state['kbvqa']: image_qa_app(st.session_state['kbvqa']) def image_qa_app(kbvqa): # Initialize session state for storing the current image and its Q&A history if 'current_image' not in st.session_state: st.session_state['current_image'] = None if 'qa_history' not in st.session_state: st.session_state['qa_history'] = [] # Display sample images as clickable thumbnails st.write("Choose from sample images:") cols = st.columns(len(sample_images)) for idx, sample_image_path in enumerate(sample_images): with cols[idx]: image = Image.open(sample_image_path) if st.image(image, use_column_width=True): st.session_state['current_image'] = image st.session_state['qa_history'] = [] # Image uploader uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"]) if uploaded_image is not None: st.session_state['current_image'] = Image.open(uploaded_image) st.session_state['qa_history'] = [] # Display the current image if st.session_state['current_image'] is not None: st.image(st.session_state['current_image'], caption='Uploaded Image.', use_column_width=True) # Question input question = st.text_input("Ask a question about this image:") # Get Answer button if st.button('Get Answer'): # Process the question answer = answer_question(st.session_state['current_image'], question, model=kbvqa) free_gpu_resources() st.session_state['qa_history'].append((question, answer)) # Display all Q&A for q, a in st.session_state['qa_history']: st.text(f"Q: {q}\nA: {a}\n") # Main function def main(): st.sidebar.title("Navigation") selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"]) if selection == "Home": st.title("MultiModal Learning for Knowledg-Based Visual Question Answering") st.write("Home page content goes here...") elif selection == "Dissertation Report": st.title("Dissertation Report") st.write("Click the link below to view the PDF.") # Example to display a link to a PDF st.download_button( label="Download PDF", data=open("Files/Dissertation Report.pdf", "rb"), file_name="example.pdf", mime="application/octet-stream" ) elif selection == "Evaluation Results": st.title("Evaluation Results") st.write("This is a Place Holder until the contents are uploaded.") elif selection == "Dataset Analysis": st.title("OK-VQA Dataset Analysis") st.write("This is a Place Holder until the contents are uploaded.") elif selection == "Run Inference": run_inference() elif selection == "Object Detection": run_object_detection() if __name__ == "__main__": main()