sonald commited on
Commit
fe0c25c
1 Parent(s): 062c67b
Files changed (2) hide show
  1. app.py +92 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langchain as lc
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.schema import HumanMessage, SystemMessage, AIMessage
4
+ from langchain import PromptTemplate, LLMChain, HuggingFaceHub, OpenAI, FewShotPromptTemplate
5
+ from langchain.prompts.example_selector import LengthBasedExampleSelector
6
+ from langchain.chains import ConversationChain, MapReduceChain
7
+ from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
8
+ from langchain.callbacks import get_openai_callback
9
+
10
+ import os
11
+
12
+ from langchain.chains import LLMMathChain, SQLDatabaseChain
13
+ from langchain.agents import Tool, load_tools, initialize_agent, AgentType
14
+
15
+ def conversation_agent():
16
+ model = OpenAI(openai_api_key=os.environ['OPENAI_API_KEY'])
17
+ tools = load_tools(['llm-math', 'terminal'], llm=model)
18
+
19
+ prompt = PromptTemplate(template="{question}", input_variables=['question'])
20
+ llm_chain = LLMChain(llm=model, prompt=prompt)
21
+ llm_tool = Tool(name="Search", func=llm_chain.run, description="general QA")
22
+ tools.append(llm_tool)
23
+
24
+ memory = ConversationBufferMemory(memory_key="chat_history")
25
+ conversation_agent = initialize_agent(tools=tools,
26
+ llm=model,
27
+ max_iterations=3,
28
+ verbose=True,
29
+ agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
30
+ memory=memory)
31
+ resp = conversation_agent("what is (4.5*2.1)^2.2?")
32
+ print(resp)
33
+
34
+ # resp = conversation_agent("if Mary has four apples and Giorgio brings two and a half apple "
35
+ # "boxes (apple box contains eight apples), how many apples do we "
36
+ # "have?")
37
+ # print(resp)
38
+
39
+ resp = conversation_agent("what is the capital of Norway?")
40
+ print(resp)
41
+
42
+ resp = conversation_agent("what's the most famous landmark of this city")
43
+ print(resp)
44
+
45
+ resp = conversation_agent("free -h")
46
+ print(resp)
47
+
48
+ print("--------")
49
+ print(conversation_agent.agent.llm_chain.prompt.template)
50
+
51
+ from langchain.agents.react.base import DocstoreExplorer
52
+ from langchain.docstore import Wikipedia
53
+
54
+ import gradio as gr
55
+
56
+ def demo8():
57
+
58
+ model = OpenAI(openai_api_key=os.environ['OPENAI_API_KEY'])
59
+ tools = load_tools(['llm-math', 'terminal'], llm=model)
60
+
61
+ prompt = PromptTemplate(template="{question}", input_variables=['question'])
62
+ llm_chain = LLMChain(llm=model, prompt=prompt)
63
+ llm_tool = Tool(name="Search", func=llm_chain.run, description="general QA")
64
+ tools.append(llm_tool)
65
+
66
+ memory = ConversationBufferMemory(memory_key="chat_history")
67
+ conversation_agent = initialize_agent(tools=tools,
68
+ llm=model,
69
+ max_iterations=3,
70
+ verbose=True,
71
+ agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
72
+ memory=memory)
73
+ # resp = conversation_agent("what is (4.5*2.1)^2.2?")
74
+ def answer(question, history=[]):
75
+ history.append(question)
76
+ resp = conversation_agent(question)
77
+ print(f"resp: {resp}")
78
+ history.append(resp)
79
+
80
+
81
+ with gr.Blocks() as demo:
82
+ chatbot = gr.Chatbot(elem_id="chatbot")
83
+ state = gr.State([])
84
+
85
+ with gr.Row():
86
+ text = gr.Textbox(show_label=False, placeholder="enter your prompt")
87
+ text.submit(answer, inputs=[text, state], outputs=[chatbot, state])
88
+
89
+ demo.launch()
90
+
91
+ if __name__ == "__main__":
92
+ demo8()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ openai
2
+ langchain
3
+ gradio