|
import os |
|
import json |
|
|
|
import gradio as gr |
|
from openai import OpenAI |
|
from tenacity import retry, wait_random_exponential, stop_after_attempt |
|
|
|
from functions_definition import get_functions, get_openai_function_tools |
|
|
|
OPENAI_KEY = os.getenv("OPENAI_KEY") |
|
client = OpenAI(api_key=OPENAI_KEY) |
|
|
|
|
|
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3)) |
|
def chat_completion_request(messages, tools=None, tool_choice=None): |
|
print(f"query message {messages}") |
|
try: |
|
response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=messages, |
|
tools=tools, |
|
tool_choice=tool_choice, |
|
) |
|
print(response.choices[0].message.content) |
|
return response |
|
except Exception as e: |
|
print("Unable to generate ChatCompletion response!") |
|
print(f"Exception: {e}") |
|
return e |
|
|
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
): |
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": [{"type": "text", "text": "You are a helpful agent"}], |
|
} |
|
] |
|
for val in history: |
|
if val[0]: |
|
messages.append( |
|
{"role": "user", "content": [{"type": "text", "text": val[0]}]} |
|
) |
|
if val[1]: |
|
messages.append( |
|
{"role": "assistant", "content": [{"type": "text", "text": val[1]}]} |
|
) |
|
|
|
messages.append({"role": "user", "content": [{"type": "text", "text": message}]}) |
|
|
|
response = chat_completion_request( |
|
messages, tools=get_openai_function_tools(), tool_choice="auto" |
|
) |
|
|
|
response_message = response.choices[0].message |
|
tool_calls = response_message.tool_calls |
|
if tool_calls: |
|
available_functions = get_functions() |
|
messages.append(response_message) |
|
for tool_call in tool_calls: |
|
function_name = tool_call.function.name |
|
function_to_call = available_functions[function_name] |
|
function_args = json.loads(tool_call.function.arguments) |
|
function_response = function_to_call( |
|
type=function_args.get("type"), |
|
) |
|
messages.append( |
|
{ |
|
"tool_call_id": tool_call.id, |
|
"role": "tool", |
|
"name": function_name, |
|
"content": function_response, |
|
} |
|
) |
|
second_response = chat_completion_request(messages) |
|
messages.append( |
|
{ |
|
"role": "assistant", |
|
"content": [ |
|
{"type": "text", "text": second_response.choices[0].message.content} |
|
], |
|
} |
|
) |
|
return second_response.choices[0].message.content |
|
messages.append( |
|
{ |
|
"role": "assistant", |
|
"content": [{"type": "text", "text": response.choices[0].message.content}], |
|
} |
|
) |
|
return response.choices[0].message.content |
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
demo = gr.ChatInterface(respond, title="Function Calling Demo") |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|