RouterFix / app.py
yasserrmd's picture
Update app.py
e06335c verified
import gradio as gr
import spaces
import markdown
import requests
import torch
from PIL import Image
from transformers import MllamaForConditionalGeneration, AutoProcessor
SCHEMA_DEFINITION= """{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"Issue_Description": {
"type": "string"
},
"Root_Cause_Analysis": {
"type": "object",
"properties": {
"LED_Analysis": {
"type": "object",
"properties": {
"Color": {
"type": "string"
},
"Pattern": {
"type": "string"
},
"Indicates": {
"type": "string"
}
},
"required": [
"Color",
"Pattern",
"Indicates"
]
},
"Error_Code": {
"type": "string"
},
"Possible_Cause": {
"type": "string"
}
},
"required": [
"LED_Analysis",
"Error_Code",
"Possible_Cause"
]
},
"Step_by_Step_Troubleshooting": {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"Action": {
"type": "string"
},
"Details": {
"type": "string"
},
"Expected Outcome": {
"type": "string"
}
},
"required": [
"Action",
"Details",
"Expected Outcome"
]
},
{
"type": "object",
"properties": {
"Action": {
"type": "string"
},
"Details": {
"type": "string"
},
"Expected Outcome": {
"type": "string"
}
},
"required": [
"Action",
"Details",
"Expected Outcome"
]
},
{
"type": "object",
"properties": {
"Action": {
"type": "string"
},
"Details": {
"type": "string"
},
"Expected Outcome": {
"type": "string"
}
},
"required": [
"Action",
"Details",
"Expected Outcome"
]
},
{
"type": "object",
"properties": {
"Action": {
"type": "string"
},
"Details": {
"type": "string"
},
"Expected Outcome": {
"type": "string"
}
},
"required": [
"Action",
"Details",
"Expected Outcome"
]
}
]
},
"Recommended_Actions": {
"type": "object",
"properties": {
"Immediate_Action": {
"type": "string"
},
"If_Unresolved": {
"type": "string"
},
"Preventative_Measure": {
"type": "string"
}
},
"required": [
"Immediate_Action",
"If_Unresolved",
"Preventative_Measure"
]
}
},
"required": [
"Issue_Description",
"Root_Cause_Analysis",
"Step_by_Step_Troubleshooting",
"Recommended_Actions"
]
}"""
SYSTEM_INSTRUCTION="You are a router troubleshooter. Your job is to analyze the provided router image, identify potential issues such as faulty connections, incorrect LED patterns, or error codes, and offer precise troubleshooting steps. Based on your analysis, generate a detailed observation that includes a root cause analysis, step-by-step actions for resolving the issue, and recommended preventive measures. The output must be in JSON format as per the following schema, ensuring users can easily follow and implement the suggested solutions.\n" + SCHEMA_DEFINITION
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
model = MllamaForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
)
processor = AutoProcessor.from_pretrained(model_id)
def extract_assistant_reply(input_string):
# Define the tag that indicates the start of the assistant's reply
start_tag = "<|start_header_id|>assistant<|end_header_id|>"
# Find the position where the assistant's reply starts
start_index = input_string.find(start_tag)
if start_index == -1:
return "Assistant's reply not found."
start_index += len(start_tag)
# Extract everything after the start tag
assistant_reply = input_string[start_index:].strip()
return assistant_reply
def extract_json_from_markdown(markdown_text):
"""Extract JSON or code block from markdown text."""
try:
# Find the start and end of the code block (with or without "json")
start_idx = markdown_text.find('```')
end_idx = markdown_text.find('```', start_idx + 3)
# If the block starts with '```json', skip the 'json' part
if markdown_text[start_idx:start_idx + 7] == '```json':
start_idx += len('```json')
else:
start_idx += len('```')
# Extract and clean up the code block (json or not)
json_str = markdown_text[start_idx:end_idx].strip()
# Try to load it as JSON
return json.loads(json_str)
except Exception as e:
print(f"Error extracting JSON: {e}")
return None
@spaces.GPU
def diagnose_router(image):
messages = [
{"role": "user", "content": [
{"type": "image"},
{"type": "text", "text": SYSTEM_INSTRUCTION}
]}
]
input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
inputs = processor(image, input_text, return_tensors="pt").to(model.device)
# Generate the output from the model
output = model.generate(**inputs, max_new_tokens=300)
print(output)
markdown_text = processor.decode(output[0])
print(markdown_text)
# Extract JSON from the markdown text
#result = extract_json_from_markdown(markdown_text)
#print (result)
# Generate HTML content for structured display
# html_output = f"""
# <div style="font-family: Arial, sans-serif; color: #333;">
# <h2>Router Diagnosis</h2>
# <h3>Issue Description</h3>
# <p><strong>{result['Issue_Description']}</strong></p>
# <h3>Root Cause Analysis</h3>
# <ul>
# <li><strong>LED Color:</strong> {result['Root_Cause_Analysis']['LED_Analysis']['Color']}</li>
# <li><strong>LED Pattern:</strong> {result['Root_Cause_Analysis']['LED_Analysis']['Pattern']}</li>
# <li><strong>Indicates:</strong> {result['Root_Cause_Analysis']['LED_Analysis']['Indicates']}</li>
# <li><strong>Error Code:</strong> {result['Root_Cause_Analysis']['Error_Code']}</li>
# <li><strong>Possible Cause:</strong> {result['Root_Cause_Analysis']['Possible_Cause']}</li>
# </ul>
# <h3>Step-by-Step Troubleshooting</h3>
# <ol>
# """
# # Loop through each step in the troubleshooting process (now a list)
# for step in result["Step_by_Step_Troubleshooting"]:
# html_output += f"""
# <li><strong>{step['Action']}</strong>: {step['Details']}<br/>
# <em>Expected Outcome:</em> {step['Expected Outcome']}</li>
# """
# # Adding the Recommended Actions section
# html_output += f"""
# </ol>
# <h3>Recommended Actions</h3>
# <ul>
# <li><strong>Immediate Action:</strong> {result['Recommended_Actions']['Immediate_Action']}</li>
# <li><strong>If Unresolved:</strong> {result['Recommended_Actions']['If_Unresolved']}</li>
# <li><strong>Preventative Measure:</strong> {result['Recommended_Actions']['Preventative_Measure']}</li>
# </ul>
# </div>
# """
markdown_text=extract_assistant_reply(markdown_text)
html_output = markdown.markdown(markdown_text)
return html_output
# Gradio UI
interface = gr.Interface(
fn=diagnose_router,
inputs=gr.Image(type="pil", label="Upload an image of the faulty router"),
outputs=gr.HTML(),
title="Router Diagnosis",
description="Upload a photo of your router to receive a professional diagnosis and troubleshooting steps displayed in a structured, easy-to-read format."
)
# Launch the UI
interface.launch()