|
import gradio as gr |
|
import spaces |
|
import markdown |
|
import requests |
|
import torch |
|
from PIL import Image |
|
from transformers import MllamaForConditionalGeneration, AutoProcessor |
|
|
|
SCHEMA_DEFINITION= """{ |
|
"$schema": "http://json-schema.org/draft-04/schema#", |
|
"type": "object", |
|
"properties": { |
|
"Issue_Description": { |
|
"type": "string" |
|
}, |
|
"Root_Cause_Analysis": { |
|
"type": "object", |
|
"properties": { |
|
"LED_Analysis": { |
|
"type": "object", |
|
"properties": { |
|
"Color": { |
|
"type": "string" |
|
}, |
|
"Pattern": { |
|
"type": "string" |
|
}, |
|
"Indicates": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Color", |
|
"Pattern", |
|
"Indicates" |
|
] |
|
}, |
|
"Error_Code": { |
|
"type": "string" |
|
}, |
|
"Possible_Cause": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"LED_Analysis", |
|
"Error_Code", |
|
"Possible_Cause" |
|
] |
|
}, |
|
"Step_by_Step_Troubleshooting": { |
|
"type": "array", |
|
"items": [ |
|
{ |
|
"type": "object", |
|
"properties": { |
|
"Action": { |
|
"type": "string" |
|
}, |
|
"Details": { |
|
"type": "string" |
|
}, |
|
"Expected Outcome": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Action", |
|
"Details", |
|
"Expected Outcome" |
|
] |
|
}, |
|
{ |
|
"type": "object", |
|
"properties": { |
|
"Action": { |
|
"type": "string" |
|
}, |
|
"Details": { |
|
"type": "string" |
|
}, |
|
"Expected Outcome": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Action", |
|
"Details", |
|
"Expected Outcome" |
|
] |
|
}, |
|
{ |
|
"type": "object", |
|
"properties": { |
|
"Action": { |
|
"type": "string" |
|
}, |
|
"Details": { |
|
"type": "string" |
|
}, |
|
"Expected Outcome": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Action", |
|
"Details", |
|
"Expected Outcome" |
|
] |
|
}, |
|
{ |
|
"type": "object", |
|
"properties": { |
|
"Action": { |
|
"type": "string" |
|
}, |
|
"Details": { |
|
"type": "string" |
|
}, |
|
"Expected Outcome": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Action", |
|
"Details", |
|
"Expected Outcome" |
|
] |
|
} |
|
] |
|
}, |
|
"Recommended_Actions": { |
|
"type": "object", |
|
"properties": { |
|
"Immediate_Action": { |
|
"type": "string" |
|
}, |
|
"If_Unresolved": { |
|
"type": "string" |
|
}, |
|
"Preventative_Measure": { |
|
"type": "string" |
|
} |
|
}, |
|
"required": [ |
|
"Immediate_Action", |
|
"If_Unresolved", |
|
"Preventative_Measure" |
|
] |
|
} |
|
}, |
|
"required": [ |
|
"Issue_Description", |
|
"Root_Cause_Analysis", |
|
"Step_by_Step_Troubleshooting", |
|
"Recommended_Actions" |
|
] |
|
}""" |
|
SYSTEM_INSTRUCTION="You are a router troubleshooter. Your job is to analyze the provided router image, identify potential issues such as faulty connections, incorrect LED patterns, or error codes, and offer precise troubleshooting steps. Based on your analysis, generate a detailed observation that includes a root cause analysis, step-by-step actions for resolving the issue, and recommended preventive measures. The output must be in JSON format as per the following schema, ensuring users can easily follow and implement the suggested solutions.\n" + SCHEMA_DEFINITION |
|
|
|
|
|
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct" |
|
|
|
model = MllamaForConditionalGeneration.from_pretrained( |
|
model_id, |
|
torch_dtype=torch.bfloat16, |
|
device_map="auto", |
|
) |
|
processor = AutoProcessor.from_pretrained(model_id) |
|
|
|
|
|
def extract_assistant_reply(input_string): |
|
|
|
start_tag = "<|start_header_id|>assistant<|end_header_id|>" |
|
|
|
start_index = input_string.find(start_tag) |
|
if start_index == -1: |
|
return "Assistant's reply not found." |
|
start_index += len(start_tag) |
|
|
|
assistant_reply = input_string[start_index:].strip() |
|
return assistant_reply |
|
|
|
def extract_json_from_markdown(markdown_text): |
|
"""Extract JSON or code block from markdown text.""" |
|
try: |
|
|
|
start_idx = markdown_text.find('```') |
|
end_idx = markdown_text.find('```', start_idx + 3) |
|
|
|
|
|
if markdown_text[start_idx:start_idx + 7] == '```json': |
|
start_idx += len('```json') |
|
else: |
|
start_idx += len('```') |
|
|
|
|
|
json_str = markdown_text[start_idx:end_idx].strip() |
|
|
|
|
|
return json.loads(json_str) |
|
except Exception as e: |
|
print(f"Error extracting JSON: {e}") |
|
return None |
|
|
|
@spaces.GPU |
|
def diagnose_router(image): |
|
messages = [ |
|
{"role": "user", "content": [ |
|
{"type": "image"}, |
|
{"type": "text", "text": SYSTEM_INSTRUCTION} |
|
]} |
|
] |
|
input_text = processor.apply_chat_template(messages, add_generation_prompt=True) |
|
inputs = processor(image, input_text, return_tensors="pt").to(model.device) |
|
|
|
|
|
output = model.generate(**inputs, max_new_tokens=300) |
|
print(output) |
|
markdown_text = processor.decode(output[0]) |
|
print(markdown_text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
markdown_text=extract_assistant_reply(markdown_text) |
|
html_output = markdown.markdown(markdown_text) |
|
return html_output |
|
|
|
|
|
interface = gr.Interface( |
|
fn=diagnose_router, |
|
inputs=gr.Image(type="pil", label="Upload an image of the faulty router"), |
|
outputs=gr.HTML(), |
|
title="Router Diagnosis", |
|
description="Upload a photo of your router to receive a professional diagnosis and troubleshooting steps displayed in a structured, easy-to-read format." |
|
) |
|
|
|
|
|
interface.launch() |
|
|