capradeepgujaran
commited on
Commit
β’
00759b9
1
Parent(s):
c8bc392
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,8 @@ import io
|
|
6 |
import json
|
7 |
from groq import Groq
|
8 |
import logging
|
|
|
|
|
9 |
|
10 |
# Set up logging
|
11 |
logging.basicConfig(level=logging.DEBUG)
|
@@ -29,68 +31,106 @@ def encode_image(image):
|
|
29 |
buffered = io.BytesIO()
|
30 |
image.save(buffered, format="PNG")
|
31 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
|
|
|
|
|
|
|
|
32 |
else:
|
33 |
raise ValueError(f"Unsupported image type: {type(image)}")
|
34 |
except Exception as e:
|
35 |
logger.error(f"Error encoding image: {str(e)}")
|
36 |
raise
|
37 |
|
38 |
-
def analyze_construction_image(
|
39 |
-
if
|
40 |
-
logger.warning("No
|
41 |
-
return [("No
|
42 |
|
43 |
try:
|
44 |
-
logger.info("Starting
|
45 |
-
|
46 |
-
logger.debug("Image encoded successfully")
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
"
|
51 |
-
|
52 |
-
{
|
53 |
-
"type": "text",
|
54 |
-
"text": "Analyze this construction site image. Identify any safety issues or hazards, categorize them, provide a detailed description, and suggest steps to resolve them."
|
55 |
-
},
|
56 |
{
|
57 |
-
"
|
58 |
-
"
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
}
|
62 |
]
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
logger.info("Analysis completed successfully")
|
86 |
-
|
87 |
-
# Return the result directly, without any parsing
|
88 |
-
return [("Image analysis request", result)]
|
89 |
except Exception as e:
|
90 |
-
logger.error(f"Error during
|
91 |
logger.error(traceback.format_exc())
|
92 |
error_message = f"Error during analysis: {str(e)}. Please try again or contact support if the issue persists."
|
93 |
-
return [("
|
94 |
|
95 |
def chat_about_image(message, chat_history):
|
96 |
try:
|
@@ -164,14 +204,15 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
|
|
164 |
<div class="header">
|
165 |
<h1>ποΈ Construction Site Safety Analyzer</h1>
|
166 |
</div>
|
167 |
-
<p class="subheader">Enhance workplace safety and compliance with AI-powered image analysis using Llama 3.2 90B Vision and expert chat assistance.</p>
|
168 |
</div>
|
169 |
"""
|
170 |
)
|
171 |
|
172 |
with gr.Row():
|
173 |
with gr.Column(scale=1):
|
174 |
-
image_input = gr.
|
|
|
175 |
analyze_button = gr.Button("π Analyze Safety Hazards", elem_classes="analyze-button")
|
176 |
with gr.Column(scale=2):
|
177 |
with gr.Group(elem_classes="chat-container"):
|
@@ -185,16 +226,16 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
|
|
185 |
)
|
186 |
clear = gr.Button("ποΈ Clear", elem_classes="clear-button")
|
187 |
|
188 |
-
def update_chat(history,
|
189 |
history = history or []
|
190 |
-
history.
|
191 |
return history
|
192 |
|
193 |
analyze_button.click(
|
194 |
analyze_construction_image,
|
195 |
-
inputs=[image_input],
|
196 |
outputs=[chatbot],
|
197 |
-
postprocess=lambda x: update_chat(chatbot.value, x
|
198 |
)
|
199 |
|
200 |
msg.submit(chat_about_image, [msg, chatbot], [msg, chatbot])
|
|
|
6 |
import json
|
7 |
from groq import Groq
|
8 |
import logging
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
|
12 |
# Set up logging
|
13 |
logging.basicConfig(level=logging.DEBUG)
|
|
|
31 |
buffered = io.BytesIO()
|
32 |
image.save(buffered, format="PNG")
|
33 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
34 |
+
elif isinstance(image, np.ndarray): # If image is a numpy array (from video)
|
35 |
+
is_success, buffer = cv2.imencode(".png", image)
|
36 |
+
if is_success:
|
37 |
+
return base64.b64encode(buffer).decode('utf-8')
|
38 |
else:
|
39 |
raise ValueError(f"Unsupported image type: {type(image)}")
|
40 |
except Exception as e:
|
41 |
logger.error(f"Error encoding image: {str(e)}")
|
42 |
raise
|
43 |
|
44 |
+
def analyze_construction_image(images, video=None):
|
45 |
+
if not images and video is None:
|
46 |
+
logger.warning("No images or video provided")
|
47 |
+
return [("No input", "Error: Please upload images or a video for analysis.")]
|
48 |
|
49 |
try:
|
50 |
+
logger.info("Starting analysis")
|
51 |
+
results = []
|
|
|
52 |
|
53 |
+
if images:
|
54 |
+
for i, image in enumerate(images):
|
55 |
+
image_data_url = f"data:image/png;base64,{encode_image(image)}"
|
56 |
+
messages = [
|
|
|
|
|
|
|
|
|
57 |
{
|
58 |
+
"role": "user",
|
59 |
+
"content": [
|
60 |
+
{
|
61 |
+
"type": "text",
|
62 |
+
"text": f"Analyze this construction site image (Image {i+1}/{len(images)}). Identify any safety issues or hazards, categorize them, provide a detailed description, and suggest steps to resolve them."
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"type": "image_url",
|
66 |
+
"image_url": {
|
67 |
+
"url": image_data_url
|
68 |
+
}
|
69 |
+
}
|
70 |
+
]
|
71 |
}
|
72 |
]
|
73 |
+
completion = client.chat.completions.create(
|
74 |
+
model="llama-3.2-90b-vision-preview",
|
75 |
+
messages=messages,
|
76 |
+
temperature=0.7,
|
77 |
+
max_tokens=1000,
|
78 |
+
top_p=1,
|
79 |
+
stream=False,
|
80 |
+
stop=None
|
81 |
+
)
|
82 |
+
result = completion.choices[0].message.content
|
83 |
+
results.append((f"Image {i+1} analysis", result))
|
84 |
+
|
85 |
+
if video:
|
86 |
+
cap = cv2.VideoCapture(video.name)
|
87 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
88 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
89 |
+
duration = frame_count / fps
|
90 |
+
|
91 |
+
# Analyze frames at 0%, 25%, 50%, 75%, and 100% of the video duration
|
92 |
+
for i, time_point in enumerate([0, 0.25, 0.5, 0.75, 1]):
|
93 |
+
cap.set(cv2.CAP_PROP_POS_MSEC, time_point * duration * 1000)
|
94 |
+
ret, frame = cap.read()
|
95 |
+
if ret:
|
96 |
+
image_data_url = f"data:image/png;base64,{encode_image(frame)}"
|
97 |
+
messages = [
|
98 |
+
{
|
99 |
+
"role": "user",
|
100 |
+
"content": [
|
101 |
+
{
|
102 |
+
"type": "text",
|
103 |
+
"text": f"Analyze this frame from a construction site video (Frame {i+1}/5 at {time_point*100}% of video duration). Identify any safety issues or hazards, categorize them, provide a detailed description, and suggest steps to resolve them."
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"type": "image_url",
|
107 |
+
"image_url": {
|
108 |
+
"url": image_data_url
|
109 |
+
}
|
110 |
+
}
|
111 |
+
]
|
112 |
+
}
|
113 |
+
]
|
114 |
+
completion = client.chat.completions.create(
|
115 |
+
model="llama-3.2-90b-vision-preview",
|
116 |
+
messages=messages,
|
117 |
+
temperature=0.7,
|
118 |
+
max_tokens=1000,
|
119 |
+
top_p=1,
|
120 |
+
stream=False,
|
121 |
+
stop=None
|
122 |
+
)
|
123 |
+
result = completion.choices[0].message.content
|
124 |
+
results.append((f"Video frame {i+1} analysis", result))
|
125 |
+
cap.release()
|
126 |
|
127 |
logger.info("Analysis completed successfully")
|
128 |
+
return results
|
|
|
|
|
129 |
except Exception as e:
|
130 |
+
logger.error(f"Error during analysis: {str(e)}")
|
131 |
logger.error(traceback.format_exc())
|
132 |
error_message = f"Error during analysis: {str(e)}. Please try again or contact support if the issue persists."
|
133 |
+
return [("Analysis error", error_message)]
|
134 |
|
135 |
def chat_about_image(message, chat_history):
|
136 |
try:
|
|
|
204 |
<div class="header">
|
205 |
<h1>ποΈ Construction Site Safety Analyzer</h1>
|
206 |
</div>
|
207 |
+
<p class="subheader">Enhance workplace safety and compliance with AI-powered image and video analysis using Llama 3.2 90B Vision and expert chat assistance.</p>
|
208 |
</div>
|
209 |
"""
|
210 |
)
|
211 |
|
212 |
with gr.Row():
|
213 |
with gr.Column(scale=1):
|
214 |
+
image_input = gr.File(label="Upload Construction Site Images", file_count="multiple", type="file", elem_classes="image-container")
|
215 |
+
video_input = gr.Video(label="Upload Construction Site Video", elem_classes="image-container")
|
216 |
analyze_button = gr.Button("π Analyze Safety Hazards", elem_classes="analyze-button")
|
217 |
with gr.Column(scale=2):
|
218 |
with gr.Group(elem_classes="chat-container"):
|
|
|
226 |
)
|
227 |
clear = gr.Button("ποΈ Clear", elem_classes="clear-button")
|
228 |
|
229 |
+
def update_chat(history, new_messages):
|
230 |
history = history or []
|
231 |
+
history.extend(new_messages)
|
232 |
return history
|
233 |
|
234 |
analyze_button.click(
|
235 |
analyze_construction_image,
|
236 |
+
inputs=[image_input, video_input],
|
237 |
outputs=[chatbot],
|
238 |
+
postprocess=lambda x: update_chat(chatbot.value, x)
|
239 |
)
|
240 |
|
241 |
msg.submit(chat_about_image, [msg, chatbot], [msg, chatbot])
|