chatdemo / app.py
sonald's picture
update
731907d
raw
history blame
3.71 kB
import langchain as lc
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain import PromptTemplate, LLMChain, HuggingFaceHub, OpenAI, FewShotPromptTemplate
from langchain.prompts.example_selector import LengthBasedExampleSelector
from langchain.chains import ConversationChain, MapReduceChain
from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
from langchain.callbacks import get_openai_callback
import os
from langchain.chains import LLMMathChain, SQLDatabaseChain
from langchain.agents import Tool, load_tools, initialize_agent, AgentType
def conversation_agent():
model = OpenAI(openai_api_key=os.environ['OPENAI_API_KEY'])
tools = load_tools(['llm-math', 'terminal'], llm=model)
prompt = PromptTemplate(template="{question}", input_variables=['question'])
llm_chain = LLMChain(llm=model, prompt=prompt)
llm_tool = Tool(name="Search", func=llm_chain.run, description="general QA")
tools.append(llm_tool)
memory = ConversationBufferMemory(memory_key="chat_history")
conversation_agent = initialize_agent(tools=tools,
llm=model,
max_iterations=3,
verbose=True,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
memory=memory)
resp = conversation_agent("what is (4.5*2.1)^2.2?")
print(resp)
# resp = conversation_agent("if Mary has four apples and Giorgio brings two and a half apple "
# "boxes (apple box contains eight apples), how many apples do we "
# "have?")
# print(resp)
resp = conversation_agent("what is the capital of Norway?")
print(resp)
resp = conversation_agent("what's the most famous landmark of this city")
print(resp)
resp = conversation_agent("free -h")
print(resp)
print("--------")
print(conversation_agent.agent.llm_chain.prompt.template)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore import Wikipedia
import gradio as gr
def demo8():
model = OpenAI(openai_api_key=os.environ['OPENAI_API_KEY'])
tools = load_tools(['llm-math', 'terminal'], llm=model)
prompt = PromptTemplate(template="{question}", input_variables=['question'])
llm_chain = LLMChain(llm=model, prompt=prompt)
llm_tool = Tool(name="Search", func=llm_chain.run, description="general QA")
tools.append(llm_tool)
memory = ConversationBufferMemory(memory_key="chat_history")
conversation_agent = initialize_agent(tools=tools,
llm=model,
max_iterations=3,
verbose=True,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
memory=memory)
# resp = conversation_agent("what is (4.5*2.1)^2.2?")
def answer(question, history=[]):
history.append(question)
resp = conversation_agent(question)
print(f"resp: {resp}")
history.append(resp['output'])
dial = [(u, v) for u, v in zip(history[::2], history[1::2])]
return dial, history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(elem_id="chatbot")
state = gr.State([])
with gr.Row():
text = gr.Textbox(show_label=False, placeholder="enter your prompt")
text.submit(answer, inputs=[text, state], outputs=[chatbot, state])
demo.launch()
if __name__ == "__main__":
demo8()