File size: 4,452 Bytes
125214f
 
 
 
 
 
 
 
 
 
 
 
 
4c2fc41
125214f
e00ca5f
125214f
 
c3dcca1
125214f
c3dcca1
125214f
 
 
 
 
 
 
 
 
 
 
ecd0257
125214f
 
 
 
 
 
 
 
1a4d79e
125214f
 
 
 
1a4d79e
125214f
 
1a4d79e
125214f
 
 
 
1a4d79e
 
125214f
 
 
 
 
 
 
 
 
1a4d79e
125214f
 
 
 
 
 
1a4d79e
 
125214f
 
af795cc
923d790
af795cc
923d790
77f9d5b
 
 
3d9562e
3d8c333
4ef8835
ffae9a5
040bb8d
6412259
 
45ef170
756ae8a
72a377a
a1bb21b
 
 
 
 
125214f
6d0a61c
3d9562e
6a8ed60
d5d6538
 
6a8ed60
125214f
3d9562e
125214f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
from PIL import Image
import torch.nn as nn
import pandas as pd
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.gen_utilities import free_gpu_resources
from my_model.KBVQA import KBVQA, prepare_kbvqa_model
from my_model.utilities.state_manager import StateManager

state_manager = StateManager()

def answer_question(caption, detected_objects_str, question, model):
    free_gpu_resources()
    answer = model.generate_answer(question, caption, detected_objects_str)
    free_gpu_resources()
    return answer


# Sample images (assuming these are paths to your sample images)
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg", 
                 "Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg", 
                 "Files/sample7.jpg"]




def image_qa_app(kbvqa):
    # Display sample images as clickable thumbnails
    st.write("Choose from sample images:")
    cols = st.columns(len(sample_images))
    for idx, sample_image_path in enumerate(sample_images):
        with cols[idx]:
            image = Image.open(sample_image_path)
            st.image(image, use_column_width=True)
            if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx}'):
                state_manager.process_new_image(sample_image_path, image, kbvqa)

    # Image uploader
    uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
    if uploaded_image is not None:
        state_manager.process_new_image(uploaded_image.name, Image.open(uploaded_image), kbvqa)

    # Display and interact with each uploaded/selected image
    for image_key, image_data in state_manager.get_images_data().items():
        st.image(image_data['image'], caption=f'Uploaded Image: {image_key[-11:]}', use_column_width=True)
        if not image_data['analysis_done']:
            st.text("Cool image, please click 'Analyze Image'..")
            if st.button('Analyze Image', key=f'analyze_{image_key}'):
                caption, detected_objects_str, image_with_boxes = state_manager.analyze_image(image_data['image'], kbvqa)
                state_manager.update_image_data(image_key, caption, detected_objects_str, True)

        # Initialize qa_history for each image
        qa_history = image_data.get('qa_history', [])

        if image_data['analysis_done']:
            question = st.text_input(f"Ask a question about this image ({image_key[-11:]}):", key=f'question_{image_key}')
            if st.button('Get Answer', key=f'answer_{image_key}'):
                if question not in [q for q, _ in qa_history]:
                    answer = answer_question(image_data['caption'], image_data['detected_objects_str'], question, kbvqa)
                    state_manager.add_to_qa_history(image_key, question, answer)

        # Display Q&A history for each image
        for q, a in qa_history:
            st.text(f"Q: {q}\nA: {a}\n")





def run_inference():
    
    st.title("Run Inference")
    state_manager.initialize_state()
    state_manager.set_up_widgets()
    st.session_state['settings_changed'] = state_manager.has_state_changed()
    if st.session_state['settings_changed']:
        st.warning("Model settings have changed, please reload the model, this will take a second .. ")

    st.session_state.button_label = "Reload Model" if state_manager.is_model_loaded() and state_manager.settings_changed else "Load Model"
   # state_manager.display_session_state()
    state_manager.display_model_settings()
    state_manager.display_session_state()
    
    
    if st.session_state.method == "Fine-Tuned Model":
        if st.button(st.session_state.button_label):
            if st.session_state.button_label == "Load Model":
                if state_manager.is_model_loaded():
                    st.text("Model already loaded and no settings were changed:)")
                    
                else: state_manager.load_model()
                
            else:
                state_manager.reload_detection_model()
                

        if state_manager.is_model_loaded() and st.session_state.kbvqa.all_models_loaded:
            image_qa_app(state_manager.get_model())

    else:
        st.write(f'Model using {st.session_state.method} is not deplyed yet, will be ready later.')