File size: 3,238 Bytes
34a9275 62ffe23 34a9275 474cb38 7b67ae6 474cb38 203f861 7b67ae6 0675490 7b67ae6 caa2389 203f861 7b67ae6 203f861 7b67ae6 7cdd9fa 7b67ae6 411b212 7b67ae6 474cb38 9bcf246 474cb38 203f861 f52c513 203f861 f52c513 203f861 f52c513 203f861 474cb38 203f861 474cb38 7b67ae6 203f861 7b67ae6 203f861 7b67ae6 203f861 7b67ae6 203f861 f52c513 203f861 f52c513 474cb38 203f861 474cb38 203f861 474cb38 203f861 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import os
import json
import gradio as gr
from openai import OpenAI
from tenacity import retry, wait_random_exponential, stop_after_attempt
from functions_definition import get_functions, get_openai_function_tools
OPENAI_KEY = os.getenv("OPENAI_KEY")
client = OpenAI(api_key=OPENAI_KEY)
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages, tools=None, tool_choice=None):
print(f"query message {messages}")
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice=tool_choice,
)
print(response.choices[0].message.content)
return response
except Exception as e:
print("Unable to generate ChatCompletion response!")
print(f"Exception: {e}")
return e
def respond(
message,
history: list[tuple[str, str]],
):
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a helpful agent"}],
}
]
for val in history:
if val[0]:
messages.append(
{"role": "user", "content": [{"type": "text", "text": val[0]}]}
)
if val[1]:
messages.append(
{"role": "assistant", "content": [{"type": "text", "text": val[1]}]}
)
messages.append({"role": "user", "content": [{"type": "text", "text": message}]})
response = chat_completion_request(
messages, tools=get_openai_function_tools(), tool_choice="auto"
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
if tool_calls:
available_functions = get_functions()
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
type=function_args.get("type"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
second_response = chat_completion_request(messages)
messages.append(
{
"role": "assistant",
"content": [
{"type": "text", "text": second_response.choices[0].message.content}
],
}
)
return second_response.choices[0].message.content
messages.append(
{
"role": "assistant",
"content": [{"type": "text", "text": response.choices[0].message.content}],
}
)
return response.choices[0].message.content
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(respond, title="Function Calling Demo")
if __name__ == "__main__":
demo.launch()
|