vaibhav pingale commited on
Commit
c7f3c81
1 Parent(s): e5e888c

Initial commit: Hosptal Assistant

Browse files
Files changed (5) hide show
  1. .gitignore +4 -0
  2. README.md +84 -14
  3. app.py +66 -0
  4. hospital_query_app.py +162 -0
  5. requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ .env
2
+ __pycache__/
3
+ *.pyc
4
+ .cache/
README.md CHANGED
@@ -1,14 +1,84 @@
1
- ---
2
- title: HospitalAssitant
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.1.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: Hospital Query Assistant using AutoGen and GroqCloud API
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hospital Query Assistant
2
+
3
+ This project implements a Hospital Query Assistant using AutoGen and GroqCloud API, with a Gradio-based user interface. The assistant helps users with queries about hospital services, appointments, and general medical information.
4
+
5
+ ## Features
6
+
7
+ - Natural language processing for hospital-related queries
8
+ - Content moderation to ensure safe and appropriate responses
9
+ - Token usage tracking
10
+ - User-friendly chat interface
11
+
12
+ ## Implementation Details
13
+
14
+ ### Backend (`hospital_query_app.py`)
15
+
16
+ The backend is implemented using AutoGen and the GroqCloud API. Key components include:
17
+
18
+ 1. **AutoGen Agents**:
19
+ - User Proxy Agent: Represents the user in the conversation.
20
+ - Hospital Assistant Agent: Provides responses to user queries.
21
+
22
+ 2. **Content Moderation**:
23
+ - Uses the Mixtral-8x7b-32768 model to check for inappropriate content.
24
+ - Ensures queries are safe and relevant before processing.
25
+
26
+ 3. **Query Handling**:
27
+ - Sanitizes user input to remove potential security risks.
28
+ - Processes queries using the Hospital Assistant Agent.
29
+ - Tracks token usage for each query.
30
+
31
+ ### Frontend (`hospital_query_ui.py`)
32
+
33
+ The frontend is built using Gradio, providing a web-based user interface. Features include:
34
+
35
+ 1. **Chat Interface**:
36
+ - Displays conversation history.
37
+ - Allows users to input queries easily.
38
+
39
+ 2. **Token Usage Display**:
40
+ - Shows tokens used in the last query.
41
+ - Displays total tokens used in the session.
42
+
43
+ 3. **Clear Functionality**:
44
+ - Allows users to reset the conversation and token counters.
45
+
46
+ ## Setup and Installation
47
+
48
+ 1. Clone the repository:
49
+ ```
50
+ git clone https://github.com/vpingale077/hospital-query-assistant.git
51
+ ```
52
+
53
+ 2. Install required dependencies:
54
+ ```
55
+ pip install -r requirements.txt
56
+ ```
57
+
58
+ 3. Set up environment variables:
59
+ - Create a `.env` file in the project root.
60
+ - Add your GroqCloud API key:
61
+ ```
62
+ GROQ_API_KEY=your_api_key_here
63
+ ```
64
+
65
+ 4. Run the application:
66
+ ```
67
+ python hospital_query_ui.py
68
+ ```
69
+
70
+ ## Usage
71
+
72
+ 1. Launch the application using the command above.
73
+ 2. Open the provided URL in your web browser.
74
+ 3. Type your hospital-related query in the input box and press Enter.
75
+ 4. View the assistant's response in the chat interface.
76
+ 5. Check token usage information on the right side of the interface.
77
+
78
+ ## Note
79
+
80
+ This assistant is designed for general information purposes only and should not be used for personal medical advice or diagnoses.
81
+
82
+ ## License
83
+
84
+ [MIT License](LICENSE)
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from hospital_query_app import handle_hospital_query, set_groq_api_key
5
+ import logging
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+
10
+ # Set up logging
11
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
+
13
+ # Initialize token counter
14
+ total_tokens_used = 0
15
+ tokens_used = 0
16
+
17
+ def process_query(message, history, api_key):
18
+ global total_tokens_used, tokens_used
19
+
20
+ if not api_key:
21
+ return history + [("System", "Please enter your GROQ API key in the input field below.")]
22
+
23
+ set_groq_api_key(api_key)
24
+ result = handle_hospital_query(message)
25
+ response = result['response']
26
+ tokens_used = result['tokens_used']
27
+
28
+ total_tokens_used += tokens_used
29
+
30
+ history.append((message, response))
31
+ return history
32
+
33
+ def update_token_info():
34
+ return f"Tokens used in last query: {tokens_used}", f"Total tokens used: {total_tokens_used}"
35
+
36
+ # Custom CSS for smaller font and larger chatbot
37
+ custom_css = """
38
+ .chatbot-container {
39
+ font-size: 0.5em;
40
+ }
41
+ """
42
+
43
+ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
44
+ gr.Markdown("# Hospital Query Assistant")
45
+ gr.Markdown("Ask questions about hospital services, appointments, and general medical information.")
46
+
47
+ with gr.Row():
48
+ with gr.Column(scale=4):
49
+ chatbot = gr.Chatbot(height=500, elem_classes="chatbot-container")
50
+ msg = gr.Textbox(placeholder="Type your message here...", label="User Input")
51
+ clear = gr.Button("Clear")
52
+
53
+ with gr.Column(scale=1):
54
+ token_last = gr.Markdown("Tokens used in last query: 0")
55
+ token_total = gr.Markdown("Total tokens used: 0")
56
+ api_key = gr.Textbox(placeholder="Enter your GROQ API key here", label="GROQ API Key", type="password")
57
+
58
+ msg.submit(process_query, [msg, chatbot, api_key], chatbot)
59
+ msg.submit(update_token_info, None, [token_last, token_total])
60
+ msg.submit(lambda: "", None, msg)
61
+ clear.click(lambda: None, None, chatbot)
62
+ clear.click(lambda: ("Tokens used in last query: 0", "Total tokens used: 0"), None, [token_last, token_total])
63
+ clear.click(lambda: 0, None, msg)
64
+
65
+ if __name__ == "__main__":
66
+ demo.launch()
hospital_query_app.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import autogen
3
+ from groq import Groq
4
+ from dotenv import load_dotenv
5
+ from typing import Dict, Any
6
+ import logging
7
+ import re
8
+
9
+ # Set up logging
10
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Initialize GroqCloud client and agents
16
+ groq_client = None
17
+ GROQ_API_KEY = None
18
+ user_proxy = None
19
+ hospital_assistant = None
20
+
21
+ def set_groq_api_key(api_key: str):
22
+ global groq_client, GROQ_API_KEY, user_proxy, hospital_assistant
23
+ GROQ_API_KEY = api_key
24
+ groq_client = Groq(api_key=GROQ_API_KEY)
25
+ os.environ["OPENAI_API_KEY"] = GROQ_API_KEY # Set the environment variable for OpenAI compatibility
26
+
27
+ # Configuration for AutoGen agents
28
+ config_list = [
29
+ {
30
+ "model": "mixtral-8x7b-32768",
31
+ "api_key": GROQ_API_KEY,
32
+ }
33
+ ]
34
+
35
+ llm_config = {
36
+ "config_list": config_list,
37
+ "cache_seed": 42,
38
+ "temperature": 0.5,
39
+ "base_url": "https://api.groq.com/openai/v1",
40
+ }
41
+
42
+ # Disable Docker for code execution
43
+ code_execution_config = {"use_docker": False}
44
+
45
+ # Create User Proxy Agent
46
+ user_proxy = autogen.UserProxyAgent(
47
+ name="User_Proxy",
48
+ system_message="A human user interacting with the hospital query system.",
49
+ human_input_mode="NEVER",
50
+ code_execution_config=code_execution_config,
51
+ )
52
+
53
+ # Create Hospital Query Assistant
54
+ hospital_assistant = autogen.AssistantAgent(
55
+ name="Hospital_Assistant",
56
+ system_message="You are a hospital assistant AI. You help users with their queries about hospital services, appointments, and general medical information. Provide concise and helpful responses. Do not provide any personal medical advice or diagnoses.",
57
+ llm_config=llm_config,
58
+ )
59
+
60
+ logging.info("AutoGen agents created successfully with the provided API key.")
61
+
62
+ # Define LLAMA_GUARD_PROMPT
63
+ LLAMA_GUARD_PROMPT = """
64
+ You are an AI content moderation system. Your task is to analyze the given text and determine if it contains any inappropriate content, such as profanity, hate speech, or sensitive medical information. Respond with either "SAFE" or "UNSAFE", followed by a brief explanation.
65
+
66
+ Text to moderate: {text}
67
+
68
+ Response:
69
+ """
70
+
71
+ def sanitize_input(text: str) -> str:
72
+ # Remove any potential HTML or script tags
73
+ text = re.sub(r'<[^>]*?>', '', text)
74
+ # Remove any non-alphanumeric characters except common punctuation
75
+ text = re.sub(r'[^a-zA-Z0-9\s.,!?-]', '', text)
76
+ return text.strip()
77
+
78
+ def moderate_content(text: str) -> Dict[str, Any]:
79
+ try:
80
+ response = groq_client.chat.completions.create(
81
+ model="mixtral-8x7b-32768", # Using Mixtral as Llama Guard is not available on Groq
82
+ messages=[
83
+ {"role": "system", "content": "You are a content moderation system."},
84
+ {"role": "user", "content": LLAMA_GUARD_PROMPT.format(text=text)},
85
+ ],
86
+ max_tokens=100,
87
+ )
88
+ moderation_result = response.choices[0].message.content.strip()
89
+ is_safe = moderation_result.lower().startswith("safe")
90
+ logging.info(f"Moderation result: {moderation_result}")
91
+ return {
92
+ "is_safe": is_safe,
93
+ "explanation": moderation_result,
94
+ "tokens_used": response.usage.total_tokens
95
+ }
96
+ except Exception as e:
97
+ logging.error(f"Error in content moderation: {str(e)}")
98
+ return {"is_safe": False, "explanation": "Error in content moderation", "tokens_used": 0}
99
+
100
+ def handle_hospital_query(user_input: str) -> Dict[str, Any]:
101
+ global groq_client, GROQ_API_KEY, user_proxy, hospital_assistant
102
+ if not groq_client or not GROQ_API_KEY or not user_proxy or not hospital_assistant:
103
+ return {
104
+ "response": "Please enter your GROQ API key in the input field above.",
105
+ "tokens_used": 0
106
+ }
107
+
108
+ try:
109
+ sanitized_input = sanitize_input(user_input)
110
+
111
+ moderation_result = moderate_content(sanitized_input)
112
+ if not moderation_result["is_safe"]:
113
+ return {
114
+ "response": f"I apologize, but I cannot process this query. {moderation_result['explanation']}",
115
+ "tokens_used": moderation_result["tokens_used"]
116
+ }
117
+
118
+ response = hospital_assistant.generate_reply(
119
+ messages=[{"role": "user", "content": sanitized_input}],
120
+ sender=user_proxy,
121
+ )
122
+
123
+ # Assuming the response is a string, we need to get token usage from the Groq client
124
+ assistant_response = groq_client.chat.completions.create(
125
+ model="mixtral-8x7b-32768",
126
+ messages=[{"role": "user", "content": sanitized_input}],
127
+ max_tokens=500,
128
+ )
129
+
130
+ return {
131
+ "response": response,
132
+ "tokens_used": moderation_result["tokens_used"] + assistant_response.usage.total_tokens
133
+ }
134
+ except Exception as e:
135
+ logging.error(f"Error in handling hospital query: {str(e)}")
136
+ return {
137
+ "response": f"I apologize, but I encountered an error while processing your query: {str(e)}. Please try again later.",
138
+ "tokens_used": 0
139
+ }
140
+
141
+ # Main application loop (if running directly)
142
+ if __name__ == "__main__":
143
+ print("Welcome to the Hospital Query System. Type 'exit' to quit.")
144
+ api_key = input("Please enter your GROQ API key: ")
145
+ set_groq_api_key(api_key)
146
+ total_tokens = 0
147
+ while True:
148
+ try:
149
+ user_input = input("User: ")
150
+ if user_input.lower() == "exit":
151
+ break
152
+ result = handle_hospital_query(user_input)
153
+ print(f"Assistant: {result['response']}")
154
+ print(f"Tokens used: {result['tokens_used']}")
155
+ total_tokens += result['tokens_used']
156
+ print(f"Total tokens used: {total_tokens}")
157
+ except KeyboardInterrupt:
158
+ print("\nExiting the application...")
159
+ break
160
+ except Exception as e:
161
+ logging.error(f"Unexpected error: {str(e)}")
162
+ print("An unexpected error occurred. Please try again.")
requirements.txt ADDED
Binary file (130 Bytes). View file