File size: 4,150 Bytes
34a9275 62ffe23 34a9275 474cb38 7b67ae6 474cb38 7b67ae6 474cb38 7b67ae6 0675490 7b67ae6 caa2389 c785d28 7b67ae6 caa2389 7b67ae6 7cdd9fa 7b67ae6 474cb38 9bcf246 474cb38 9881b92 f52c513 94e7b46 474cb38 7b67ae6 474cb38 7b67ae6 94e7b46 f52c513 94e7b46 f52c513 474cb38 70db9de 474cb38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import os
import json
import gradio as gr
from openai import OpenAI
from huggingface_hub import InferenceClient
from tenacity import retry, wait_random_exponential, stop_after_attempt
OPENAI_KEY = os.getenv("OPENAI_KEY")
client = OpenAI(api_key=OPENAI_KEY)
def get_current_weather(location, unit="celsius"):
"""Get the current weather in a given location"""
if "taipei" in location.lower():
return json.dumps({"location": "Taipei", "temperature": "10", "unit": unit})
else:
return json.dumps({"location": location, "temperature": "unknown"})
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages, tools=None, tool_choice=None):
print(f'query message {messages}')
try:
response = client.chat.completions.create(
model='gpt-4o',
messages=messages,
tools=tools,
tool_choice=tool_choice,
)
print(response.choices[0].message.content)
return response
except Exception as e:
print("Unable to generate ChatCompletion response")
print(f"Exception: {e}")
return e
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
},
"required": ["location", "unit"],
},
}
}
]
def respond(
message,
history: list[tuple[str, str]],
):
messages = [{"role": "system", "content": [{'type': 'text', 'text': 'You are a helpful agent'}]}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": [{'type': 'text', 'text': val[0]}]})
if val[1]:
messages.append({"role": "assistant", "content": [{'type': 'text', 'text': val[1]}]})
messages.append({"role": "user", "content": [{'type': 'text', 'text': message}]})
response = chat_completion_request(messages, tools=tools, tool_choice='auto')
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
if tool_calls:
available_functions = {
"get_current_weather": get_current_weather,
}
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
second_response = chat_completion_request(messages)
messages.append({"role": "assistant", "content": [{'type': 'text', 'text': second_response.choices[0].message.content}]})
return second_response.choices[0].message.content
messages.append({"role": "assistant", "content": [{'type': 'text', 'text': response.choices[0].message.content}]})
return response.choices[0].message.content
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
title='Function Calling Demo'
)
if __name__ == "__main__":
demo.launch() |