File size: 7,366 Bytes
125214f
 
 
 
 
 
 
 
 
 
 
1812270
eff41fa
125214f
 
1948116
 
eaa7a81
1948116
eaa7a81
1948116
8bd4ecd
1948116
 
 
 
 
 
 
 
 
 
d4b85b8
 
1948116
 
5667733
 
29f316e
1948116
 
 
 
d4b85b8
1948116
 
 
 
7316948
0bac0de
 
180f51e
8cedf13
 
 
 
824fe45
522b5f3
 
824fe45
 
522b5f3
 
8cedf13
 
 
 
 
c791f22
8cedf13
522b5f3
 
8cedf13
 
522b5f3
8cedf13
 
3a20d92
 
8cedf13
 
 
 
1948116
3394a6e
 
1948116
3394a6e
1948116
824fe45
1948116
3b61686
 
 
 
 
1948116
 
7c8c861
6a338ab
11781a6
1948116
5600c91
 
16b6309
5600c91
 
 
 
3a20d92
3394a6e
5600c91
 
 
3394a6e
c791f22
5600c91
3394a6e
3db717c
3394a6e
 
3a20d92
3394a6e
 
c45c1b1
904c909
 
c791f22
904c909
 
802de9d
c791f22
 
904c909
 
 
 
c791f22
6b844f6
904c909
802de9d
904c909
c791f22
3394a6e
 
6b844f6
c791f22
6b844f6
 
 
522b5f3
6b844f6
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
from PIL import Image
import torch.nn as nn
import pandas as pd
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities.gen_utilities import free_gpu_resources
from my_model.state_manager import StateManager


class InferenceRunner(StateManager):
    def __init__(self):
        
        super().__init__()
        self.initialize_state()
        self.sample_images = [
            "Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg"]

    def answer_question(self, caption, detected_objects_str, question, model):
        free_gpu_resources()
        answer = model.generate_answer(question, caption, detected_objects_str)
        free_gpu_resources()
        return answer


    def image_qa_app(self, kbvqa):
        # Display sample images as clickable thumbnails
        self.col1.write("Choose from sample images:")
        cols = self.col1.columns(len(self.sample_images))
        for idx, sample_image_path in enumerate(self.sample_images):
            with cols[idx]:
                image = Image.open(sample_image_path)
                image_for_display = self.resize_image(sample_image_path, 80, 80)
                st.image(image_for_display)
                if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx}'):
                    self.process_new_image(sample_image_path, image, kbvqa)

        # Image uploader
        uploaded_image = self.col1.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
        if uploaded_image is not None:
            self.process_new_image(uploaded_image.name, Image.open(uploaded_image), kbvqa)

        # Display and interact with each uploaded/selected image
        with self.col2:
            for image_key, image_data in self.get_images_data().items():
                with st.container():
                    nested_col21, nested_col22 = st.columns([0.65, 0.35])
                    image_for_display = self.resize_image(image_data['image'], 600)
                    nested_col21.image(image_for_display, caption=f'Uploaded Image: {image_key[-11:]}')
                    if not image_data['analysis_done']:
                        nested_col22.text("Please click 'Analyze Image'..")
                        with nested_col22:
                            if st.button('Analyze Image', key=f'analyze_{image_key}', on_click=self.disable_widgets, disabled=self.is_widget_disabled):

                                caption, detected_objects_str, image_with_boxes = self.analyze_image(image_data['image'], kbvqa)
                                self.update_image_data(image_key, caption, detected_objects_str, True)
                                st.session_state['loading_in_progress'] = False

        
                    # Initialize qa_history for each image
                    qa_history = image_data.get('qa_history', [])
        
                    if image_data['analysis_done']:
                        st.session_state['loading_in_progress'] = False
                        question = nested_col22.text_input(f"Ask a question about this image ({image_key[-11:]}):", key=f'question_{image_key}')
                        if nested_col22.button('Get Answer', key=f'answer_{image_key}', on_click=self.disable_widgets, disabled=self.is_widget_disabled):

                            if question not in [q for q, _ in qa_history]:
                                answer = self.answer_question(image_data['caption'], image_data['detected_objects_str'], question, kbvqa)
                                st.session_state['loading_in_progress'] = False
                                self.add_to_qa_history(image_key, question, answer)
                            else: nested_col22.warning("This questions has already been answered.")
                                
                            st.session_state['loading_in_progress'] = False
        
                    # Display Q&A history for each image
                    for q, a in qa_history:
                        nested_col22.text(f"Q: {q}\nA: {a}\n")

    def display_message(self, message, warning=False, write=False, text=False):
        pass

        
    def run_inference(self):
        
        self.set_up_widgets()
        load_fine_tuned_model = False
        fine_tuned_model_already_loaded = False
        reload_detection_model = False
        force_reload_full_model = False
        
        st.session_state['settings_changed'] = self.has_state_changed()
        if st.session_state['settings_changed']:
            self.col1.warning("Model settings have changed, please reload the model, this will take a second .. ")
           

        st.session_state.button_label = "Reload Model" if self.is_model_loaded() and self.settings_changed else "Load Model"

        with self.col1:
                
            if st.session_state.method == "Fine-Tuned Model":
                
                with st.container():
                    nested_col11, nested_col12 = st.columns([0.5, 0.5])
                    if nested_col11.button(st.session_state.button_label, on_click=self.disable_widgets, disabled=self.is_widget_disabled):
                        
                        if st.session_state.button_label == "Load Model":
                            if self.is_model_loaded():
                                free_gpu_resources()
                                fine_tuned_model_already_loaded = True
                      
                            else:
                                load_fine_tuned_model = True
                        else:
                            reload_detection_model = True
                            
                    if nested_col12.button("Force Reload", on_click=self.disable_widgets, disabled=self.is_widget_disabled):
                        force_reload_full_model = True

                if load_fine_tuned_model:
                    free_gpu_resources()
                    self.load_model()
                    st.session_state['loading_in_progress'] = False
                    
                elif fine_tuned_model_already_loaded:
                    free_gpu_resources()
                    self.col1.text("Model already loaded and no settings were changed:)")
                    st.session_state['loading_in_progress'] = False
                    
                elif reload_detection_model:
                    free_gpu_resources()
                    self.reload_detection_model()
                    st.session_state['loading_in_progress'] = False
                    
                elif force_reload_full_model:
                    free_gpu_resources()
                    self.force_reload_model()
                    st.session_state['loading_in_progress'] = False
                
            elif st.session_state.method == "In-Context Learning (n-shots)":
                self.col1.warning(f'Model using {st.session_state.method} is not deployed yet, will be ready later.')
                st.session_state['loading_in_progress'] = False

        
        if self.is_model_loaded():
            st.session_state['loading_in_progress'] = False
            free_gpu_resources()
            self.image_qa_app(self.get_model())