File size: 13,294 Bytes
10b5661
 
4ec8ad4
230a814
10b5661
85d2f78
c8ee59e
e2524e7
00759b9
 
16d08c3
2799c43
e8734eb
e2524e7
 
 
 
4ec8ad4
10b5661
 
e2524e7
 
 
10b5661
c8ee59e
 
4ec8ad4
85d2f78
e2524e7
 
 
 
 
 
 
 
00759b9
 
 
 
e2524e7
 
 
 
 
85d2f78
16d08c3
 
 
230a814
16d08c3
 
 
230a814
 
16d08c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b19aee
 
10b5661
5b19aee
 
 
 
 
 
 
 
 
 
8bded3a
5b19aee
 
 
 
 
 
 
 
 
1de2d2a
5b19aee
 
 
 
0b8f58d
5b19aee
 
 
 
 
 
 
 
0b8f58d
 
5b19aee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b8f58d
5b19aee
 
 
 
 
 
 
 
 
 
 
 
 
 
0b8f58d
5b19aee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88efb3f
f95de19
88efb3f
2d89b4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64a9ffc
2d89b4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ec8ad4
2799c43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86cf2b9
 
 
 
 
 
 
 
417694d
e8dabed
417694d
f6a3081
 
e8dabed
f6a3081
 
 
 
 
 
 
 
 
 
 
796cbb5
e37b756
18f5cd8
796cbb5
 
e8dabed
796cbb5
 
18f5cd8
417694d
 
4ec8ad4
417694d
 
 
 
 
e8dabed
417694d
00759b9
417694d
 
 
 
5b19aee
2d89b4e
ccddb90
 
 
 
 
 
e37b756
5b19aee
e37b756
 
 
5b19aee
e37b756
2a172dc
 
 
 
 
 
e37b756
5b19aee
e37b756
 
 
 
 
 
 
 
5b19aee
e37b756
2799c43
 
 
5b19aee
2799c43
 
5b19aee
 
 
 
 
 
 
00759b9
e637753
fe52e0c
 
6d8af26
 
2d89b4e
5b19aee
 
6d8af26
00759b9
2d89b4e
 
 
 
4ec8ad4
5d1cf98
2799c43
 
 
 
 
e8dabed
 
 
 
 
 
5b19aee
10b5661
1f0ad8d
2799c43
1f0ad8d
2799c43
1f0ad8d
5b19aee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
import os
import base64
import gradio as gr
from PIL import Image, ImageOps
import io
import json
from groq import Groq
import logging
import cv2
import numpy as np
import traceback
from datetime import datetime
import tempfile

# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Load environment variables
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
if not GROQ_API_KEY:
    logger.error("GROQ_API_KEY is not set in environment variables")
    raise ValueError("GROQ_API_KEY is not set")

# Initialize Groq client
client = Groq(api_key=GROQ_API_KEY)

def encode_image(image):
    try:
        if isinstance(image, str):  # If image is a file path
            with open(image, "rb") as image_file:
                return base64.b64encode(image_file.read()).decode('utf-8')
        elif isinstance(image, Image.Image):  # If image is a PIL Image
            buffered = io.BytesIO()
            image.save(buffered, format="PNG")
            return base64.b64encode(buffered.getvalue()).decode('utf-8')
        elif isinstance(image, np.ndarray):  # If image is a numpy array (from video)
            is_success, buffer = cv2.imencode(".png", image)
            if is_success:
                return base64.b64encode(buffer).decode('utf-8')
        else:
            raise ValueError(f"Unsupported image type: {type(image)}")
    except Exception as e:
        logger.error(f"Error encoding image: {str(e)}")
        raise

def resize_image(image, max_size=(800, 800)):
    """Resize image to avoid exceeding the API size limits."""
    try:
        image.thumbnail(max_size, Image.Resampling.LANCZOS)  # Use LANCZOS resampling for better quality
        return image
    except Exception as e:
        logger.error(f"Error resizing image: {str(e)}")
        raise
        
def extract_frames_from_video(video, frame_points=[0, 0.5, 1], max_size=(800, 800)):
    """Extract key frames from the video at specific time points."""
    cap = cv2.VideoCapture(video)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    duration = frame_count / fps

    frames = []
    for time_point in frame_points:
        cap.set(cv2.CAP_PROP_POS_MSEC, time_point * duration * 1000)
        ret, frame = cap.read()
        if ret:
            resized_frame = cv2.resize(frame, max_size)
            frames.append(resized_frame)
    cap.release()
    return frames

def analyze_file(file):
    """Analyze a single file (image or video)"""
    try:
        file_type = file.name.split('.')[-1].lower()
        if file_type in ['jpg', 'jpeg', 'png', 'bmp']:
            return analyze_image(file)
        elif file_type in ['mp4', 'avi', 'mov', 'webm']:
            return analyze_video(file)
        else:
            return "Unsupported file type. Please upload an image or video file."
    except Exception as e:
        logger.error(f"Error analyzing file: {str(e)}")
        return f"Error analyzing file: {str(e)}"

def analyze_image(image_file):
    image = Image.open(image_file.name)
    resized_image = resize_image(image)
    image_data_url = f"data:image/png;base64,{encode_image(resized_image)}"
    
    instruction = ("You are an AI assistant specialized in analyzing images for safety issues. "
                   "Your task is first to explain what you see in the image and determine if the image shows a construction site. "
                   "If it does, identify any safety issues or hazards, categorize them, and provide a detailed description, "
                   "and suggest steps to resolve them. If it's not a construction site, simply state that")

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": f"{instruction}\n\nAnalyze this image. First, determine if it's a construction site. If it is, explain the image in detail, focusing on safety aspects. If it's not, briefly describe what you see."
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": image_data_url
                    }
                }
            ]
        }
    ]
    
    completion = client.chat.completions.create(
        model="llama-3.2-90b-vision-preview",
        messages=messages,
        temperature=0.7,
        max_tokens=1000,
        top_p=1,
        stream=False,
        stop=None
    )
    
    return completion.choices[0].message.content

def analyze_video(video_file):
    frames = extract_frames_from_video(video_file.name)
    results = []
    
    instruction = ("You are an AI assistant specialized in analyzing images for safety issues. "
                   "Your task is first to explain what you see in the image and determine if the image shows a construction site. "
                   "If it does, identify any safety issues or hazards, categorize them, and provide a detailed description, "
                   "and suggest steps to resolve them. If it's not a construction site, simply state that")

    for i, frame in enumerate(frames):
        image_data_url = f"data:image/png;base64,{encode_image(frame)}"
        messages = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": f"{instruction}\n\nAnalyze this frame from a video (Frame {i+1}/{len(frames)}). First, explain the video and then determine if it's a construction site. If it is, explain what you observe, focusing on safety aspects. If it's not, briefly describe what you see."
                    },
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": image_data_url
                        }
                    }
                ]
            }
        ]
        completion = client.chat.completions.create(
            model="llama-3.2-90b-vision-preview",
            messages=messages,
            temperature=0.7,
            max_tokens=1000,
            top_p=1,
            stream=False,
            stop=None
        )
        results.append(f"Frame {i+1} analysis:\n{completion.choices[0].message.content}\n\n")
    
    return "\n".join(results)


        
def chat_about_image(message, chat_history):
    try:
        # Prepare the conversation history for the API
        messages = [
            {"role": "system", "content": "You are an AI assistant specialized in analyzing construction site images and answering questions about them. Use the information from the initial analysis to answer user queries."},
        ]
        
        # Add chat history to messages
        for human, ai in chat_history:
            if human:
                messages.append({"role": "user", "content": human})
            if ai:
                messages.append({"role": "assistant", "content": ai})
        
        # Add the new user message
        messages.append({"role": "user", "content": message})
        
        # Make API call
        completion = client.chat.completions.create(
            model="llama-3.2-90b-vision-preview",
            messages=messages,
            temperature=0.7,
            max_tokens=500,
            top_p=1,
            stream=False,
            stop=None
        )
        
        response = completion.choices[0].message.content
        chat_history.append((message, response))
        
        return "", chat_history
    except Exception as e:
        logger.error(f"Error during chat: {str(e)}")
        return "", chat_history + [(message, f"Error: {str(e)}")]

def generate_summary_report(chat_history):
    """
    Generate a summary report from the chat history.
    """
    report = "Construction Site Safety Analysis Report\n"
    report += "=" * 40 + "\n"
    report += f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"

    for i, (user, ai) in enumerate(chat_history, 1):
        if user:
            report += f"Query {i}:\n{user}\n\n"
        if ai:
            report += f"Analysis {i}:\n{ai}\n\n"
        report += "-" * 40 + "\n"

    return report

def download_report(chat_history):
    """
    Generate and provide a download link for the summary report.
    """
    report = generate_summary_report(chat_history)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"safety_analysis_report_{timestamp}.txt"
    
    # Create a temporary file
    with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as temp_file:
        temp_file.write(report)
        temp_file_path = temp_file.name
    
    return temp_file_path


# Custom CSS for improved styling
custom_css = """
.container { max-width: 1200px; margin: auto; padding-top: 1.5rem; }
.header { text-align: center; margin-bottom: 1rem; }
.header h1 { color: #2c3e50; font-size: 2.5rem; }
.subheader { 
    color: #34495e; 
    font-size: 1rem; 
    line-height: 1.2; 
    margin-bottom: 1.5rem; 
    text-align: center; 
    padding: 0 15px;
    white-space: nowrap;
    overflow: hidden;
    text-overflow: ellipsis;
}
.image-container { border: 2px dashed #3498db; border-radius: 10px; padding: 1rem; text-align: center; margin-bottom: 1rem; }
.analyze-button { background-color: #2ecc71 !important; color: white !important; width: 100%; }
.clear-button { background-color: #e74c3c !important; color: white !important; width: 100px !important; }
.chatbot { border: 1px solid #bdc3c7; border-radius: 10px; padding: 1rem; height: 500px; overflow-y: auto; }
.chat-input { border: 1px solid #bdc3c7; border-radius: 5px; padding: 0.5rem; width: 100%; }
.groq-badge { position: fixed; bottom: 10px; right: 10px; background-color: #f39c12; color: white; padding: 5px 10px; border-radius: 5px; font-weight: bold; }
.chat-container { display: flex; flex-direction: column; height: 100%; }
.input-row { display: flex; align-items: center; margin-top: 10px; justify-content: space-between; }
.input-row > div:first-child { flex-grow: 1; margin-right: 10px; }
"""

# Create the Gradio interface
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
    gr.HTML(
        """
        <div class="container">
            <div class="header">
                <h1>πŸ—οΈ Construction Site Safety Analyzer</h1>
            </div>
            <p class="subheader">Enhance workplace safety and compliance with AI-powered image and video analysis using Llama 3.2 90B Vision and expert chat assistance.</p>
        </div>
        """
    )
    
    # First row: Combined file upload for images and videos
    with gr.Row():
        file_input = gr.File(
            label="Upload Construction Site Images or Videos",
            file_count="multiple",
            type="filepath",
            elem_classes="file-container"
        )
    
    # Second row: Analyze Safety Hazards Button
    with gr.Row():
        analyze_button = gr.Button("πŸ” Analyze Safety Hazards", elem_classes="analyze-button")
    
    # Third row: Chat Interface (Safety Analysis Results)
    with gr.Row():
        chatbot = gr.Chatbot(
            label="Safety Analysis Results and Expert Chat",
            elem_classes="chatbot",
            show_share_button=False,  # Remove share button
            show_copy_button=False    # Remove copy button
        )
    
    # Fourth row: Question Bar
    with gr.Row():
        msg = gr.Textbox(
            label="Ask about safety measures or regulations",
            placeholder="E.g., 'What OSHA guidelines apply to this hazard?'",
            show_label=False,
            elem_classes="chat-input"
        )

    # Fifth row: Clear Chat and Download Report Buttons
    with gr.Row():
        clear = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="clear-button")
        download_button = gr.Button("πŸ“₯ Download Report", elem_classes="download-button")

    # Add a file component to handle the download
    report_file = gr.File(label="Download Safety Analysis Report")

    def process_files(files):
        results = []
        for file in files:
            result = analyze_file(file)
            results.append((file.name, result))
        return results

    def update_chat(history, new_messages):
        history = history or []
        for title, content in new_messages:
            history.append((None, f"{title}\n\n{content}"))
        return history

    analyze_button.click(
        process_files,
        inputs=[file_input],
        outputs=[chatbot],
        postprocess=lambda x: update_chat(chatbot.value, x)
    )

    msg.submit(chat_about_image, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

    download_button.click(
        download_report,
        inputs=[chatbot],
        outputs=[report_file]
    )

    gr.HTML(
        """
        <div class="groq-badge">Powered by Groq</div>
        """
    )

# Launch the app
if __name__ == "__main__":
    try:
        iface.launch(debug=True)
    except Exception as e:
        logger.error(f"Error when trying to launch the interface: {str(e)}")
        logger.error(traceback.format_exc())
        print("Failed to launch the Gradio interface. Please check the logs for more information.")