ChenyuRabbitLove's picture
Update app.py
9eefa43 verified
raw
history blame
3.83 kB
import os
import gradio as gr
from openai import OpenAI
from huggingface_hub import InferenceClient
from tenacity import retry, wait_random_exponential, stop_after_attempt
OPENAI_KEY = os.getenv("OPENAI_KEY")
client = OpenAI(api_key=OPENAI_KEY)
def get_current_weather(location, unit="celsius"):
"""Get the current weather in a given location"""
if "taipei" in location.lower():
return json.dumps({"location": "Taipei", "temperature": "10", "unit": unit})
else:
return json.dumps({"location": location, "temperature": "unknown"})
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages, tools=None, tool_choice=None):
print(f'query message {messages}')
try:
response = client.chat.completions.create(
model='gpt-4o',
messages=messages,
tools=tools,
tool_choice=tool_choice,
)
print(response.choices[0].message.content)
return response
except Exception as e:
print("Unable to generate ChatCompletion response")
print(f"Exception: {e}")
return e
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
},
"required": ["location", "unit"],
},
}
}
]
def respond(
message,
history: list[tuple[str, str]],
):
messages = [{"role": "system", "content": [{'type': 'text', 'text': 'You are a helpful agent'}]}]
messages.append({"role": "user", "content": [{'type': 'text', 'text': message}]})
response = chat_completion_request(messages, tools=tools, tool_choice='auto')
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
if tool_calls:
available_functions = {
"get_current_weather": get_current_weather,
}
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
second_response = chat_completion_request(messages)
messages.append({"role": "assistant", "content": [{'type': 'text', 'text': second_response.choices[0].message.content}]})
print(messages)
return messages
messages.append({"role": "assistant", "content": [{'type': 'text', 'text': response.choices[0].message.content}]})
print(messages)
return messages
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
)
if __name__ == "__main__":
demo.launch()