Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -39,11 +39,11 @@ stage_analyzer_chain = LLMChain(
|
|
39 |
output_key="stage_number")
|
40 |
|
41 |
user_response_prompt = load_prompt("./templates/user_response_prompt.json")
|
42 |
-
llm = ChatOpenAI(model='gpt-
|
43 |
user_response_chain = LLMChain(
|
44 |
llm=llm,
|
45 |
prompt=user_response_prompt,
|
46 |
-
verbose=
|
47 |
output_key="user_responses"
|
48 |
)
|
49 |
|
@@ -175,8 +175,8 @@ Last user saying: {input}
|
|
175 |
conversation_stages_dict = {
|
176 |
"1": "Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.",
|
177 |
"2": "Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.",
|
178 |
-
"3": "Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this.",
|
179 |
-
"4": "After recommendation:
|
180 |
"5": "Close: When you're done, say goodbye to the user.",
|
181 |
"6": "Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.",
|
182 |
"7": "Not in the given steps: This step is for when none of the steps between 1 and 6 apply.",
|
@@ -295,6 +295,8 @@ with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:
|
|
295 |
stage_history += stage_number if stage_history == "stage history: " else ", " + stage_number
|
296 |
response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})
|
297 |
conversation_history += "이우선: " + response + "\n"
|
|
|
|
|
298 |
response = response.split('<END_OF_TURN>')[0]
|
299 |
chat_history.append((user_response, response))
|
300 |
user_response_examples = []
|
|
|
39 |
output_key="stage_number")
|
40 |
|
41 |
user_response_prompt = load_prompt("./templates/user_response_prompt.json")
|
42 |
+
llm = ChatOpenAI(model='gpt-4', temperature=0.5)
|
43 |
user_response_chain = LLMChain(
|
44 |
llm=llm,
|
45 |
prompt=user_response_prompt,
|
46 |
+
verbose=False,
|
47 |
output_key="user_responses"
|
48 |
)
|
49 |
|
|
|
175 |
conversation_stages_dict = {
|
176 |
"1": "Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.",
|
177 |
"2": "Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.",
|
178 |
+
"3": "Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this. After making a wine recommendation, it asks if the user likes the wine you recommended.",
|
179 |
+
"4": "After recommendation: If the user likes the wine you recommended, provides a link and image of wine. Otherwise, it takes you back to the recommendation stage.",
|
180 |
"5": "Close: When you're done, say goodbye to the user.",
|
181 |
"6": "Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.",
|
182 |
"7": "Not in the given steps: This step is for when none of the steps between 1 and 6 apply.",
|
|
|
295 |
stage_history += stage_number if stage_history == "stage history: " else ", " + stage_number
|
296 |
response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})
|
297 |
conversation_history += "이우선: " + response + "\n"
|
298 |
+
for line in conversation_history.split('\n'):
|
299 |
+
print(line)
|
300 |
response = response.split('<END_OF_TURN>')[0]
|
301 |
chat_history.append((user_response, response))
|
302 |
user_response_examples = []
|