File size: 4,361 Bytes
bc7d231 c7c92f9 dfda773 58e3cb5 63fc765 5554139 e9d7d81 fdc69a0 bc7d231 0fa8d68 f35e4aa 1d94d91 f35e4aa 2468667 0fa8d68 1d94d91 f35e4aa 2468667 f35e4aa 766fe20 f35e4aa 2468667 f35e4aa 9d4c7bc 40e0ea9 9d4c7bc 40e0ea9 9d4c7bc 0fa8d68 9d4c7bc 0fa8d68 9d4c7bc 0fa8d68 9d4c7bc 0fa8d68 9d4c7bc 53fe45c 9d4c7bc 2468667 501585d 2468667 c6252cf f35e4aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
from PIL import Image
import torch.nn as nn
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities import free_gpu_resources
# Placeholder for undefined functions
def load_caption_model():
st.write("Placeholder for load_caption_model function")
return None, None
def answer_question(image, question, model, processor):
return "Placeholder answer for the question"
def get_caption(image):
return "Generated caption for the image"
def free_gpu_resources():
pass
# Sample images (assuming these are paths to your sample images)
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg",
"Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg",
"Files/sample7.jpg"]
def run_inference():
st.title("Run Inference")
image_qa_and_object_detection()
def image_qa_and_object_detection():
# Image-based Q&A functionality
st.subheader("Talk to your image")
image_qa_app()
# Object Detection functionality
st.subheader("Object Detection")
object_detection_app()
def image_qa_app():
# Initialize session state for storing the current image and its Q&A history
if 'current_image' not in st.session_state:
st.session_state['current_image'] = None
if 'qa_history' not in st.session_state:
st.session_state['qa_history'] = []
# Display sample images as clickable thumbnails
st.write("Choose from sample images:")
cols = st.columns(len(sample_images))
for idx, sample_image_path in enumerate(sample_images):
with cols[idx]:
image = Image.open(sample_image_path)
if st.image(image, use_column_width=True):
st.session_state['current_image'] = image
st.session_state['qa_history'] = []
# Image uploader
uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
if uploaded_image is not None:
st.session_state['current_image'] = Image.open(uploaded_image)
st.session_state['qa_history'] = []
# Display the current image
if st.session_state['current_image'] is not None:
st.image(st.session_state['current_image'], caption='Uploaded Image.', use_column_width=True)
# Question input
question = st.text_input("Ask a question about this image:")
# Get Answer button
if st.button('Get Answer'):
# Process the question
answer = answer_question(st.session_state['current_image'], question)
st.session_state['qa_history'].append((question, answer))
# Display all Q&A
for q, a in st.session_state['qa_history']:
st.text(f"Q: {q}\nA: {a}\n")
# Object Detection App
def object_detection_app():
# ... Implement your code for object detection ...
pass
# Main function
def main():
st.sidebar.title("Navigation")
selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"])
if selection == "Home":
st.title("MultiModal Learning for Knowledg-Based Visual Question Answering")
st.write("Home page content goes here...")
elif selection == "Dissertation Report":
st.title("Dissertation Report")
st.write("Click the link below to view the PDF.")
# Example to display a link to a PDF
st.download_button(
label="Download PDF",
data=open("Files/Dissertation Report.pdf", "rb"),
file_name="example.pdf",
mime="application/octet-stream"
)
elif selection == "Evaluation Results":
st.title("Evaluation Results")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Dataset Analysis":
st.title("OK-VQA Dataset Analysis")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Run Inference":
run_inference()
elif selection == "Object Detection":
run_object_detection()
if __name__ == "__main__":
main()
|