File size: 14,451 Bytes
4a51346
 
 
4d48f2a
 
4a51346
4d48f2a
 
 
 
 
 
4a51346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159e834
4a51346
 
7ad16c9
d23f041
7ad16c9
 
 
d23f041
7ad16c9
 
 
4a51346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159e834
4a51346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d23f041
 
4a51346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159e834
4a51346
 
 
 
 
 
 
 
 
4662883
4a51346
 
 
e4cea54
7ad16c9
0b1f152
 
 
 
4a51346
 
 
 
 
 
 
 
 
 
 
 
 
0b1f152
6869a07
 
0b1f152
4a51346
9a7116f
0b1f152
4a51346
6869a07
 
 
7ad16c9
6869a07
 
e4cea54
 
 
 
 
 
 
9f8b9c3
4a51346
0b1f152
4a51346
d5950f1
d23f041
 
4a51346
 
7ad16c9
 
 
6869a07
4a51346
6869a07
4a51346
77c5c64
6869a07
 
77c5c64
4a51346
9a7116f
7ad16c9
6869a07
 
be2d30a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
import os
import configparser

# config = configparser.ConfigParser()
# config.read('./secrets.ini')

# openai_api_key = config['OPENAI']['OPENAI_API_KEY']
# serper_api_key = config['SERPER']['SERPER_API_KEY']
# serp_api_key = config['SERPAPI']['SERPAPI_API_KEY']
# os.environ.update({'OPENAI_API_KEY': openai_api_key})
# os.environ.update({'SERPER_API_KEY': serper_api_key})
# os.environ.update({'SERPAPI_API_KEY': serp_api_key})

from typing import List, Union
import re
import json

import pandas as pd
from langchain import SerpAPIWrapper, LLMChain
from langchain.agents import Tool, AgentType, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.document_loaders import DataFrameLoader, SeleniumURLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.prompts import PromptTemplate, StringPromptTemplate, load_prompt, BaseChatPromptTemplate
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.schema import AgentAction, AgentFinish, HumanMessage
from langchain.vectorstores import DocArrayInMemorySearch, Chroma

stage_analyzer_inception_prompt = load_prompt("./templates/stage_analyzer_inception_prompt_template.json")
llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0.0)
stage_analyzer_chain = LLMChain(
    llm=llm,
    prompt=stage_analyzer_inception_prompt, 
    verbose=False, 
    output_key="stage_number")

user_response_prompt = load_prompt("./templates/user_response_prompt.json")
llm = ChatOpenAI(model='gpt-4', temperature=0.5)
user_response_chain = LLMChain(
    llm=llm,
    prompt=user_response_prompt, 
    verbose=False,
    output_key="user_responses"
)

df = pd.read_json('./data/unified_wine_data.json', encoding='utf-8', lines=True)

loader =DataFrameLoader(data_frame=df, page_content_column='name')
docs = loader.load()
embeddings = OpenAIEmbeddings()

metadata_field_info = [
    AttributeInfo(
        name="body",
        description="1-5 rating for the body of wine",
        type="int",
    ),
    AttributeInfo(
        name="sweetness",
        description="1-5 rating for the sweetness of wine",
        type="int",
    ),
    AttributeInfo(
        name="alcohol",
        description="1-5 rating for the alcohol of wine",
        type="int",
    ),
    AttributeInfo(
        name="price",
        description="The price of the wine",
        type="int",
    ),
    AttributeInfo(
        name="rating", 
        description="1-5 rating for the wine", 
        type="float"
    ),
    AttributeInfo(
        name="wine_type", 
        description="The type of wine. It can be '๋ ˆ๋“œ', '๋กœ์ œ', '์ŠคํŒŒํด๋ง', 'ํ™”์ดํŠธ', '๋””์ €ํŠธ', '์ฃผ์ •๊ฐ•ํ™”'", 
        type="string"
    ),
    AttributeInfo(
        name="country", 
        description="The country of wine. It can be '๊ธฐํƒ€ ์‹ ๋Œ€๋ฅ™', '๊ธฐํƒ€๊ตฌ๋Œ€๋ฅ™', '๋‰ด์งˆ๋žœ๋“œ', '๋…์ผ', '๋ฏธ๊ตญ', '์ŠคํŽ˜์ธ', '์•„๋ฅดํ—จํ‹ฐ๋‚˜', '์ดํƒˆ๋ฆฌ์•„', '์น ๋ ˆ', 'ํฌ๋ฃจํˆฌ์นผ', 'ํ”„๋ž‘์Šค', 'ํ˜ธ์ฃผ'", 
        type="float"
    ),
]

vectorstore = Chroma.from_documents(docs, embeddings)
document_content_description = "Database of a wine"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
    llm, vectorstore, document_content_description, metadata_field_info, verbose=False
)  # Added missing closing parenthesis

def search_with_url(query):
    return SeleniumURLLoader(urls=[query]).load()

index = VectorstoreIndexCreator(
    vectorstore_cls=DocArrayInMemorySearch
).from_loaders([loader])

search = SerpAPIWrapper()

tools = [
    Tool(
        name="Wine database",
        func=retriever.get_relevant_documents,
        description="""
Database about the wines in wine store. You can get information such as the price of the wine, purchase URL, features, rating information, and more.
You can search wines with the following attributes:
- body: 1-5 rating int for the body of wine. You have to specify greater than or less than. For example, if you want to search for wines with a body rating of less than 3, enter 'body: gt 0 lt 3'
- price: The price range of the wine. Please enter the price range in the form of range. For example, if you want to search for wines that cost less than 20,000 won, enter 'price: gt 0 lt20000'
- rating: 1-5 rating float for the wine. You have to specify greater than or less than. For example, if you want to search for wines with a rating of less than 3, enter 'rating: gt 0 lt 3'
- wine_type: The type of wine. It can be '๋ ˆ๋“œ', '๋กœ์ œ', '์ŠคํŒŒํด๋ง', 'ํ™”์ดํŠธ', '๋””์ €ํŠธ', '์ฃผ์ •๊ฐ•ํ™”'
- name: The name of wine. ์ž…๋ ฅํ•  ๋•Œ๋Š” '์™€์ธ ์ด๋ฆ„์€ "๋น„๋ƒ ์กฐ์ž˜" ์ž…๋‹ˆ๋‹ค' ์ด๋Ÿฐ ์‹์œผ๋กœ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.
"""
    ),
    Tool(
        name = "Search specific wine with url",
        func=search_with_url,
        description="Search specific wine with url. Query must be url"
    ),
    Tool(
        name = "Wine database 2",
        func=index.query,
        description="Database about the wines in wine store. You can use this tool if you're having trouble getting information from the wine database tool above. Query must be in String"
    ),
    Tool(
        name = "Search",
        func=search.run,
        description="Useful for when you need to ask with search. Search in English only."
    ),
]

template = """
Your role is a chatbot that asks customers questions about wine and makes recommendations.
Never forget your name is "์ด์šฐ์„ ".
Keep your responses in short length to retain the user's attention. 
Only generate one response at a time! When you are done generating, end with '<END_OF_TURN>' to give the user a chance to respond.
Responses should be in Korean.

Complete the objective as best you can. You have access to the following tools:

{tools}

Use the following format:
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
์ด์šฐ์„ : the final response to the user

You must respond according to the conversation stage within the triple backticks and conversation history within in '======'.

Current conversation stage: 
```{conversation_stage}```

Conversation history: 
=======
{conversation_history}
=======

Last user saying: {input}
{agent_scratchpad}
"""

conversation_stages_dict = {
    "1": "Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.",
    "2": "Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.",
    "3": "Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this. After making a wine recommendation, it asks if the user likes the wine you recommended.",
    "4": "After recommendation: If the user likes the wine you recommended, provides a link and image of wine. Otherwise, it takes you back to the recommendation stage.",
    "5": "Close: When you're done, say goodbye to the user.",
    "6": "Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.",
    "7": "Not in the given steps: This step is for when none of the steps between 1 and 6 apply.",
}

# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
    # The template to use
    template: str
    # The list of tools available
    tools: List[Tool]
    
    def format(self, **kwargs) -> str:
        stage_number = kwargs.pop("stage_number")
        kwargs["conversation_stage"] = conversation_stages_dict[stage_number]
        # Get the intermediate steps (AgentAction, Observation tuples)
        # Format them in a particular way
        intermediate_steps = kwargs.pop("intermediate_steps")
        thoughts = ""
        for action, observation in intermediate_steps:
            thoughts += action.log
            thoughts += f"\nObservation: {observation}\nThought: "
        # Set the agent_scratchpad variable to that value
        kwargs["agent_scratchpad"] = thoughts
        # Create a tools variable from the list of tools provided
        kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
        # Create a list of tool names for the tools provided
        kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
        return self.template.format(**kwargs)

prompt = CustomPromptTemplate(
    template=template,
    tools=tools,
    input_variables=["input", "intermediate_steps", "conversation_history", "stage_number"]
)

class CustomOutputParser(AgentOutputParser):
    
    def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
        # Check if agent should finish
        if "์ด์šฐ์„ : " in llm_output:
            return AgentFinish(
                # Return values is generally always a dictionary with a single `output` key
                # It is not recommended to try anything else at the moment :)
                return_values={"output": llm_output.split("์ด์šฐ์„ : ")[-1].strip()},
                log=llm_output,
            )
        # Parse out the action and action input
        regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
        match = re.search(regex, llm_output, re.DOTALL)
        if not match:
            raise ValueError(f"Could not parse LLM output: `{llm_output}`")
        action = match.group(1).strip()
        action_input = match.group(2)
        # Return the action and action input
        return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)

output_parser = CustomOutputParser()

llm_chain = LLMChain(llm=ChatOpenAI(model='gpt-4', temperature=0.0), prompt=prompt, verbose=False,)

tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
    llm_chain=llm_chain, 
    output_parser=output_parser,
    stop=["\nObservation:"], 
    allowed_tools=tool_names
)

agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)

import gradio as gr

# user_response, stage_history, conversation_history, pre_conversation_history = "", "", """""", """"""

stage_description = ""
for key, value in conversation_stages_dict.items():
    stage_description += f"{key}.{value}\n"

with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:
 
    with gr.Row():
        gr.HTML("""<div style="text-align: center; max-width: 500px; margin: 0 auto;">
            <div>
                <h1>ChatWine</h1>
            </div>
            <p style="margin-bottom: 10px; font-size: 94%">
                LinkedIn <a href="https://www.linkedin.com/company/audrey-ai/about/">Audrey.ai</a>
            </p>
        </div>""")
 
    chatbot = gr.Chatbot()
    msg = gr.Textbox(label='User input')
    samples = [["์ด๋ฒˆ ์ฃผ์— ์นœ๊ตฌ๋“ค๊ณผ ๋ชจ์ž„์ด ์žˆ๋Š”๋ฐ, ํ›Œ๋ฅญํ•œ ์™€์ธ ํ•œ ๋ณ‘์„ ์ถ”์ฒœํ•ด์ค„๋ž˜?"], ["์ž…๋ฌธ์ž์—๊ฒŒ ์ข‹์€ ์™€์ธ์„ ์ถ”์ฒœํ•ด์ค„๋ž˜?"], ["๋ณด๋ฅด๋„์™€ ๋ถ€๋ฅด๊ณ ๋‰ด ์™€์ธ์˜ ์ฐจ์ด์ ์€ ๋ญ์•ผ?"]]
    user_response_examples = gr.Dataset(samples=samples, components=[msg], type="index")
    stage_history = gr.Textbox(value="stage history: ", interactive=False, label='stage history')
    submit_btn = gr.Button("์ „์†ก")
    clear_btn = gr.ClearButton([msg, chatbot])
    stage_info = gr.Textbox(value=stage_description, interactive=False, label='stage description')

    def load_example(example_id):
        global samples
        return samples[example_id][0]

    def answer(user_response, chat_history, stage_history):
        global samples
        chat_history = chat_history or []
        stage_history = stage_history or ""
        pre_conversation_history = ""
        for idx, chat in enumerate(chat_history):
            pre_conversation_history += f"User: {chat[0]} <END_OF_TURN>\n"
            pre_conversation_history += f"์ด์šฐ์„ : {chat[1]} <END_OF_TURN>\n"
        conversation_history = pre_conversation_history + f"User: {user_response} <END_OF_TURN>\n"
        stage_number = stage_analyzer_chain.run({'conversation_history': conversation_history, 'stage_history': stage_history})
        stage_number = stage_number[-1]
        stage_history += stage_number if stage_history == "stage history: " else ", " + stage_number
        response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})
        conversation_history += "์ด์šฐ์„ : " + response + "\n"
        for line in conversation_history.split('\n'):
            print(line)
        response = response.split('<END_OF_TURN>')[0]
        chat_history.append((user_response, response))
        user_response_examples = []
        for user_response_example in user_response_chain.run({'conversation_history': conversation_history}).split('|'):
            user_response_examples.append([user_response_example])
        samples = user_response_examples
 
        return "", chat_history, stage_history, gr.Dataset.update(samples=samples)

    def clear(*args):
        global samples
        samples = [["์ด๋ฒˆ ์ฃผ์— ์นœ๊ตฌ๋“ค๊ณผ ๋ชจ์ž„์ด ์žˆ๋Š”๋ฐ, ํ›Œ๋ฅญํ•œ ์™€์ธ ํ•œ ๋ณ‘์„ ์ถ”์ฒœํ•ด์ค„๋ž˜?"], ["์ž…๋ฌธ์ž์—๊ฒŒ ์ข‹์€ ์™€์ธ์„ ์ถ”์ฒœํ•ด์ค„๋ž˜?"], ["๋ณด๋ฅด๋„์™€ ๋ถ€๋ฅด๊ณ ๋‰ด ์™€์ธ์˜ ์ฐจ์ด์ ์€ ๋ญ์•ผ?"]]
        return gr.Dataset.update(samples=samples), "stage history: "

    clear_btn.click(fn=clear, inputs=[user_response_examples, stage_history], outputs=[user_response_examples, stage_history])
    user_response_examples.click(load_example, inputs=[user_response_examples], outputs=[msg])      
    submit_btn.click(answer, [msg, chatbot, stage_history], [msg, chatbot, stage_history, user_response_examples])
    msg.submit(answer, [msg, chatbot, stage_history], [msg, chatbot, stage_history, user_response_examples])
demo.launch()