Spaces:
Sleeping
Sleeping
import gradio as gr | |
from langchain.chat_models import ChatOpenAI | |
from langchain.chains import ConversationChain | |
from langchain.memory import ConversationBufferMemory | |
def respond(openai_api_key, message, buffer_memory, chat_history): | |
conversation = ConversationChain( | |
llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo'), | |
memory = buffer_memory, | |
openai_api_key = openai_api_key | |
) | |
response = conversation.predict(input=message) | |
chat_history.append([message, response]) | |
return "", buffer_memory, chat_history | |
with gr.Blocks() as demo: | |
# with gr.Column(): | |
with gr.Group(visible=True) as primary_settings: | |
with gr.Row(): | |
openai_key = gr.Textbox( | |
label="OpenAI Key", | |
type="password", | |
placeholder="sk-a83jv6fn3x8ndm78b5W..." | |
) | |
model = gr.Dropdown( | |
["gpt-4", "gpt-4-32k", | |
"gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-instruct", | |
"text-davinci-002", "text-davinci-003"], | |
label="OpenAI Model", | |
value="gpt-3.5-turbo", | |
interactive=True | |
) | |
# with gr.Accordion("Advances Settings"): | |
# gr.Dropdown( | |
# [-1, 1, 5, 10, 25], label="Conversation Buffer (k)" | |
# ) | |
with gr.Group() as chat: | |
memory = gr.State(ConversationBufferMemory()) | |
chatbot = gr.Chatbot(label='Chatbot') | |
with gr.Row(): | |
query = gr.Textbox( | |
container=False, | |
show_label=False, | |
placeholder='Type a message...', | |
scale=10, | |
) | |
submit = gr.Button('Submit', | |
variant='primary', | |
scale=1, | |
min_width=0) | |
# Event Handling | |
query.submit(respond, [openai_key, query, memory, chatbot], [query, memory, chatbot]) | |
submit.click(respond, [openai_key, query, memory, chatbot], [query, memory, chatbot]) | |
if __name__ == "__main__": | |
demo.launch() |