File size: 6,165 Bytes
c7f3c81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import os
import autogen
from groq import Groq
from dotenv import load_dotenv
from typing import Dict, Any
import logging
import re

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Load environment variables
load_dotenv()

# Initialize GroqCloud client and agents
groq_client = None
GROQ_API_KEY = None
user_proxy = None
hospital_assistant = None

def set_groq_api_key(api_key: str):
    global groq_client, GROQ_API_KEY, user_proxy, hospital_assistant
    GROQ_API_KEY = api_key
    groq_client = Groq(api_key=GROQ_API_KEY)
    os.environ["OPENAI_API_KEY"] = GROQ_API_KEY  # Set the environment variable for OpenAI compatibility

    # Configuration for AutoGen agents
    config_list = [
        {
            "model": "mixtral-8x7b-32768",
            "api_key": GROQ_API_KEY,
        }
    ]

    llm_config = {
        "config_list": config_list,
        "cache_seed": 42,
        "temperature": 0.5,
        "base_url": "https://api.groq.com/openai/v1",
    }

    # Disable Docker for code execution
    code_execution_config = {"use_docker": False}

    # Create User Proxy Agent
    user_proxy = autogen.UserProxyAgent(
        name="User_Proxy",
        system_message="A human user interacting with the hospital query system.",
        human_input_mode="NEVER",
        code_execution_config=code_execution_config,
    )

    # Create Hospital Query Assistant
    hospital_assistant = autogen.AssistantAgent(
        name="Hospital_Assistant",
        system_message="You are a hospital assistant AI. You help users with their queries about hospital services, appointments, and general medical information. Provide concise and helpful responses. Do not provide any personal medical advice or diagnoses.",
        llm_config=llm_config,
    )

    logging.info("AutoGen agents created successfully with the provided API key.")

# Define LLAMA_GUARD_PROMPT
LLAMA_GUARD_PROMPT = """
You are an AI content moderation system. Your task is to analyze the given text and determine if it contains any inappropriate content, such as profanity, hate speech, or sensitive medical information. Respond with either "SAFE" or "UNSAFE", followed by a brief explanation.

Text to moderate: {text}

Response:
"""

def sanitize_input(text: str) -> str:
    # Remove any potential HTML or script tags
    text = re.sub(r'<[^>]*?>', '', text)
    # Remove any non-alphanumeric characters except common punctuation
    text = re.sub(r'[^a-zA-Z0-9\s.,!?-]', '', text)
    return text.strip()

def moderate_content(text: str) -> Dict[str, Any]:
    try:
        response = groq_client.chat.completions.create(
            model="mixtral-8x7b-32768",  # Using Mixtral as Llama Guard is not available on Groq
            messages=[
                {"role": "system", "content": "You are a content moderation system."},
                {"role": "user", "content": LLAMA_GUARD_PROMPT.format(text=text)},
            ],
            max_tokens=100,
        )
        moderation_result = response.choices[0].message.content.strip()
        is_safe = moderation_result.lower().startswith("safe")
        logging.info(f"Moderation result: {moderation_result}")
        return {
            "is_safe": is_safe, 
            "explanation": moderation_result,
            "tokens_used": response.usage.total_tokens
        }
    except Exception as e:
        logging.error(f"Error in content moderation: {str(e)}")
        return {"is_safe": False, "explanation": "Error in content moderation", "tokens_used": 0}

def handle_hospital_query(user_input: str) -> Dict[str, Any]:
    global groq_client, GROQ_API_KEY, user_proxy, hospital_assistant
    if not groq_client or not GROQ_API_KEY or not user_proxy or not hospital_assistant:
        return {
            "response": "Please enter your GROQ API key in the input field above.",
            "tokens_used": 0
        }
    
    try:
        sanitized_input = sanitize_input(user_input)
        
        moderation_result = moderate_content(sanitized_input)
        if not moderation_result["is_safe"]:
            return {
                "response": f"I apologize, but I cannot process this query. {moderation_result['explanation']}",
                "tokens_used": moderation_result["tokens_used"]
            }

        response = hospital_assistant.generate_reply(
            messages=[{"role": "user", "content": sanitized_input}],
            sender=user_proxy,
        )
        
        # Assuming the response is a string, we need to get token usage from the Groq client
        assistant_response = groq_client.chat.completions.create(
            model="mixtral-8x7b-32768",
            messages=[{"role": "user", "content": sanitized_input}],
            max_tokens=500,
        )
        
        return {
            "response": response,
            "tokens_used": moderation_result["tokens_used"] + assistant_response.usage.total_tokens
        }
    except Exception as e:
        logging.error(f"Error in handling hospital query: {str(e)}")
        return {
            "response": f"I apologize, but I encountered an error while processing your query: {str(e)}. Please try again later.",
            "tokens_used": 0
        }

# Main application loop (if running directly)
if __name__ == "__main__":
    print("Welcome to the Hospital Query System. Type 'exit' to quit.")
    api_key = input("Please enter your GROQ API key: ")
    set_groq_api_key(api_key)
    total_tokens = 0
    while True:
        try:
            user_input = input("User: ")
            if user_input.lower() == "exit":
                break
            result = handle_hospital_query(user_input)
            print(f"Assistant: {result['response']}")
            print(f"Tokens used: {result['tokens_used']}")
            total_tokens += result['tokens_used']
            print(f"Total tokens used: {total_tokens}")
        except KeyboardInterrupt:
            print("\nExiting the application...")
            break
        except Exception as e:
            logging.error(f"Unexpected error: {str(e)}")
            print("An unexpected error occurred. Please try again.")