{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "3853d1cd", "metadata": {}, "source": [ "### API 키 불러오기" ] }, { "cell_type": "code", "execution_count": 1, "id": "968fee23", "metadata": {}, "outputs": [], "source": [ "import os\n", "import configparser" ] }, { "cell_type": "code", "execution_count": 2, "id": "a836b0e7", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['./secrets.ini']" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "config = configparser.ConfigParser()\n", "config.read('./secrets.ini')" ] }, { "cell_type": "code", "execution_count": 3, "id": "fef7bec8", "metadata": {}, "outputs": [], "source": [ "openai_api_key = config['OPENAI']['OPENAI_API_KEY']\n", "serper_api_key = config['SERPER']['SERPER_API_KEY']\n", "serp_api_key = config['SERPAPI']['SERPAPI_API_KEY']\n", "os.environ.update({'OPENAI_API_KEY': openai_api_key})\n", "os.environ.update({'SERPER_API_KEY': serper_api_key})\n", "os.environ.update({'SERPAPI_API_KEY': serp_api_key})" ] }, { "cell_type": "code", "execution_count": 4, "id": "c41f820a", "metadata": {}, "outputs": [], "source": [ "from typing import List, Union\n", "import re\n", "import json\n", "\n", "import pandas as pd\n", "from langchain import SerpAPIWrapper, LLMChain\n", "from langchain.agents import Tool, AgentType, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.document_loaders import DataFrameLoader, SeleniumURLLoader\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.indexes import VectorstoreIndexCreator\n", "from langchain.prompts import PromptTemplate, StringPromptTemplate, load_prompt, BaseChatPromptTemplate\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain.schema import AgentAction, AgentFinish, HumanMessage\n", "from langchain.vectorstores import DocArrayInMemorySearch, Chroma" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e995c9e3", "metadata": {}, "source": [ "### Get Stage Analyzer Prompt" ] }, { "cell_type": "code", "execution_count": 5, "id": "25c82b0d", "metadata": {}, "outputs": [], "source": [ "stage_analyzer_inception_prompt = load_prompt(\"./templates/stage_analyzer_inception_prompt_template.json\")\n", "llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0.0)\n", "stage_analyzer_chain = LLMChain(\n", " llm=llm,\n", " prompt=stage_analyzer_inception_prompt, \n", " verbose=True, \n", " output_key=\"stage_number\")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "63824ec7", "metadata": {}, "source": [ "### Load wine database json" ] }, { "cell_type": "code", "execution_count": 6, "id": "d1228108", "metadata": {}, "outputs": [], "source": [ "df = pd.read_json('./data/unified_wine_data.json', encoding='utf-8', lines=True)" ] }, { "cell_type": "code", "execution_count": 7, "id": "4c2ca36f", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
urlsite_namepricenameen_nameimg_urlbodyaciditytanninsweetnessalcoholwine_typecountrygraperatingpickup_locationvivino_link
0https://www.winenara.com/shop/product/product_...winenara49000모노폴 클라시코Monopole Classicohttps://www.winenara.com/uploads/product/550/1...3-1-1화이트스페인3.8https://www.vivino.com/monopole-la-rioja-blanc...
1https://www.winenara.com/shop/product/product_...winenara32000슐럼베르거 로제 스페셜 브뤼Schlumberger Rose Special Bruthttps://www.winenara.com/uploads/product/550/d...3-1-1스파클링독일3.8https://www.vivino.com/schlumberger-spring-edi...
2https://www.winenara.com/shop/product/product_...winenara50000SET)페데럴리스트 샤르도네 원통 패키지SET)THE FEDERALIST CHARDONNAYhttps://www.winenara.com/uploads/product/550/d...3-1-1화이트미국3.7https://www.vivino.com/federalist-chardonnay-m...
3https://www.winenara.com/shop/product/product_...winenara55000베니카 트레 비니스VENICA TRE VIGNIShttps://www.winenara.com/uploads/product/550/c...4-1-1화이트이탈리아3.9https://www.vivino.com/US-CA/en/venica-venica-...
4https://www.winenara.com/shop/product/product_...winenara24900SET)빌라엠비앙코 + 글라스2개 윈터패키지SET)VILLA M Bianco + GLASS WINTER PACKAGEhttps://www.winenara.com/uploads/product/550/a...-14-1디저트이탈리아3.9https://www.vivino.com/villa-m-bianco/w/1774733
\n", "
" ], "text/plain": [ " url site_name price \\\n", "0 https://www.winenara.com/shop/product/product_... winenara 49000 \n", "1 https://www.winenara.com/shop/product/product_... winenara 32000 \n", "2 https://www.winenara.com/shop/product/product_... winenara 50000 \n", "3 https://www.winenara.com/shop/product/product_... winenara 55000 \n", "4 https://www.winenara.com/shop/product/product_... winenara 24900 \n", "\n", " name en_name \\\n", "0 모노폴 클라시코 Monopole Classico \n", "1 슐럼베르거 로제 스페셜 브뤼 Schlumberger Rose Special Brut \n", "2 SET)페데럴리스트 샤르도네 원통 패키지 SET)THE FEDERALIST CHARDONNAY \n", "3 베니카 트레 비니스 VENICA TRE VIGNIS \n", "4 SET)빌라엠비앙코 + 글라스2개 윈터패키지 SET)VILLA M Bianco + GLASS WINTER PACKAGE \n", "\n", " img_url body acidity tannin \\\n", "0 https://www.winenara.com/uploads/product/550/1... 3 \n", "1 https://www.winenara.com/uploads/product/550/d... 3 \n", "2 https://www.winenara.com/uploads/product/550/d... 3 \n", "3 https://www.winenara.com/uploads/product/550/c... 4 \n", "4 https://www.winenara.com/uploads/product/550/a... -1 \n", "\n", " sweetness alcohol wine_type country grape rating pickup_location \\\n", "0 -1 -1 화이트 스페인 3.8 \n", "1 -1 -1 스파클링 독일 3.8 \n", "2 -1 -1 화이트 미국 3.7 \n", "3 -1 -1 화이트 이탈리아 3.9 \n", "4 4 -1 디저트 이탈리아 3.9 \n", "\n", " vivino_link \n", "0 https://www.vivino.com/monopole-la-rioja-blanc... \n", "1 https://www.vivino.com/schlumberger-spring-edi... \n", "2 https://www.vivino.com/federalist-chardonnay-m... \n", "3 https://www.vivino.com/US-CA/en/venica-venica-... \n", "4 https://www.vivino.com/villa-m-bianco/w/1774733 " ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.head()" ] }, { "attachments": {}, "cell_type": "markdown", "id": "a07fd3fe", "metadata": {}, "source": [ "### Prepare Langchain Tool" ] }, { "attachments": {}, "cell_type": "markdown", "id": "a7167dc9", "metadata": {}, "source": [ "#### Tool1: Wine database 1" ] }, { "cell_type": "code", "execution_count": 8, "id": "187df914", "metadata": {}, "outputs": [], "source": [ "loader =DataFrameLoader(data_frame=df, page_content_column='name')\n", "docs = loader.load()\n", "embeddings = OpenAIEmbeddings()" ] }, { "attachments": {}, "cell_type": "markdown", "id": "4a076fc4", "metadata": {}, "source": [ "아래는 wine database1에 metadata_field Attribute이다. 아래를 기준으로 서치를 진행하게 된다." ] }, { "cell_type": "code", "execution_count": 9, "id": "b18fd84a", "metadata": {}, "outputs": [], "source": [ "metadata_field_info = [\n", " AttributeInfo(\n", " name=\"body\",\n", " description=\"1-5 rating for the body of wine\",\n", " type=\"int\",\n", " ),\n", " AttributeInfo(\n", " name=\"sweetness\",\n", " description=\"1-5 rating for the sweetness of wine\",\n", " type=\"int\",\n", " ),\n", " AttributeInfo(\n", " name=\"alcohol\",\n", " description=\"1-5 rating for the alcohol of wine\",\n", " type=\"int\",\n", " ),\n", " AttributeInfo(\n", " name=\"price\",\n", " description=\"The price of the wine\",\n", " type=\"int\",\n", " ),\n", " AttributeInfo(\n", " name=\"rating\", \n", " description=\"1-5 rating for the wine\", \n", " type=\"float\"\n", " ),\n", " AttributeInfo(\n", " name=\"wine_type\", \n", " description=\"The type of wine. It can be '레드', '로제', '스파클링', '화이트', '디저트', '주정강화'\", \n", " type=\"string\"\n", " ),\n", " AttributeInfo(\n", " name=\"country\", \n", " description=\"The country of wine. It can be '기타 신대륙', '기타구대륙', '뉴질랜드', '독일', '미국', '스페인', '아르헨티나', '이탈리아', '칠레', '포루투칼', '프랑스', '호주'\", \n", " type=\"float\"\n", " ),\n", "]" ] }, { "cell_type": "code", "execution_count": 10, "id": "e24b97ab", "metadata": {}, "outputs": [], "source": [ "vectorstore = Chroma.from_documents(docs, embeddings)\n", "document_content_description = \"Database of a wine\"\n", "llm = OpenAI(temperature=0)\n", "retriever = SelfQueryRetriever.from_llm(\n", " llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n", ") # Added missing closing parenthesis\n" ] }, { "cell_type": "code", "execution_count": 11, "id": "9490ccbd", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\chois\\Desktop\\chatwine\\.venv\\lib\\site-packages\\langchain\\chains\\llm.py:275: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n", " warnings.warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "query=' ' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='wine_type', value='레드'), Operation(operator=, arguments=[Comparison(comparator=, attribute='body', value=0), Comparison(comparator=, attribute='body', value=3)])]) limit=None\n" ] }, { "data": { "text/plain": [ "[Document(page_content='바 독 피노누아', metadata={'url': 'https://www.winenara.com/shop/product/product_view?product_cd=03P976', 'site_name': 'winenara', 'price': 29000, 'en_name': 'BAR DOG PINOT NOIR', 'img_url': 'https://www.winenara.com/uploads/product/550/1936_detail_084.png', 'body': 2, 'acidity': '', 'tannin': '', 'sweetness': -1, 'alcohol': -1, 'wine_type': '레드', 'country': '미국', 'grape': '', 'rating': 3.6, 'pickup_location': '', 'vivino_link': 'https://www.vivino.com/US-CA/en/bar-dog-pinot-noir/w/7129723'}),\n", " Document(page_content='루이라뚜르 피노누아', metadata={'url': 'https://www.winenara.com/shop/product/product_view?product_cd=03H965', 'site_name': 'winenara', 'price': 52000, 'en_name': 'LOUIS LATOUR PINOT NOIR', 'img_url': 'https://www.winenara.com/uploads/product/550/493_detail_025.png', 'body': 2, 'acidity': '', 'tannin': '', 'sweetness': -1, 'alcohol': -1, 'wine_type': '레드', 'country': '프랑스', 'grape': '', 'rating': 3.6, 'pickup_location': '', 'vivino_link': 'https://www.vivino.com/GB/en/louis-latour-bourgogne-pinot-noir/w/7343'}),\n", " Document(page_content='루이라뚜르 상뜨네', metadata={'url': 'https://www.winenara.com/shop/product/product_view?product_cd=03P299', 'site_name': 'winenara', 'price': 79000, 'en_name': 'LOUIS LATOUR SANTENAY', 'img_url': 'https://www.winenara.com/uploads/product/550/489_detail_096.png', 'body': 2, 'acidity': '', 'tannin': '', 'sweetness': -1, 'alcohol': -1, 'wine_type': '레드', 'country': '프랑스', 'grape': '', 'rating': 3.8, 'pickup_location': '', 'vivino_link': 'https://www.vivino.com/louis-latour-santenay-rouge/w/7369'}),\n", " Document(page_content='비알레또 로소', metadata={'url': 'https://www.winenara.com/shop/product/product_view?product_cd=033704', 'site_name': 'winenara', 'price': 12000, 'en_name': 'VIALETTO ROSSO', 'img_url': 'https://www.winenara.com/uploads/product/550/d1ef6058de3661b565084b815e359852.png', 'body': 2, 'acidity': '', 'tannin': '', 'sweetness': -1, 'alcohol': -1, 'wine_type': '레드', 'country': '이탈리아', 'grape': '', 'rating': 3.1, 'pickup_location': '', 'vivino_link': 'https://www.vivino.com/US-CA/en/vialetto-rosso-dolce/w/2213764'})]" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "retriever.get_relevant_documents('{\"wine_type\":\"레드\", \"body\": \"lt 3 gt 0\"}') # gt means greater than, lt means less than, eq means equal to" ] }, { "attachments": {}, "cell_type": "markdown", "id": "3d325a05", "metadata": {}, "source": [ "#### Tool2: Search specific wine with url" ] }, { "cell_type": "code", "execution_count": 12, "id": "314fe0d8", "metadata": {}, "outputs": [], "source": [ "def search_with_url(query):\n", " return SeleniumURLLoader(urls=[query]).load()" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c5c2ef95", "metadata": {}, "source": [ "#### Tool3: Wine database 2" ] }, { "cell_type": "code", "execution_count": 13, "id": "7fe29cad", "metadata": {}, "outputs": [], "source": [ "index = VectorstoreIndexCreator(\n", " vectorstore_cls=DocArrayInMemorySearch\n", ").from_loaders([loader])" ] }, { "attachments": {}, "cell_type": "markdown", "id": "2fca766c", "metadata": {}, "source": [ "#### Tool4: Search in Google" ] }, { "cell_type": "code", "execution_count": 14, "id": "f09f92fe", "metadata": {}, "outputs": [], "source": [ "search = SerpAPIWrapper()" ] }, { "cell_type": "code", "execution_count": 15, "id": "f296b9a2", "metadata": {}, "outputs": [], "source": [ "tools = [\n", " Tool(\n", " name=\"Wine database\",\n", " func=retriever.get_relevant_documents,\n", " description=\"\"\"\n", "Database about the wines in wine store. You can get information such as the price of the wine, purchase URL, features, rating information, and more.\n", "You can search wines with the following attributes:\n", "- body: 1-5 rating int for the body of wine. You have to specify greater than or less than. For example, if you want to search for wines with a body rating of less than 3, enter 'body: gt 0 lt 3'\n", "- price: The price range of the wine. Please enter the price range in the form of range. For example, if you want to search for wines that cost less than 20,000 won, enter 'price: gt 0 lt20000'\n", "- rating: 1-5 rating float for the wine. You have to specify greater than or less than. For example, if you want to search for wines with a rating of less than 3, enter 'rating: gt 0 lt 3'\n", "- wine_type: The type of wine. It can be '레드', '로제', '스파클링', '화이트', '디저트', '주정강화'\n", "- name: The name of wine. 입력할 때는 '와인 이름은 \"비냐 조잘\" 입니다' 이런 식으로 입력해주세요.\n", "\"\"\"\n", " ),\n", " Tool(\n", " name = \"Search specific wine with url\",\n", " func=search_with_url,\n", " description=\"Search specific wine with url. Query must be url\"\n", " ),\n", " Tool(\n", " name = \"Wine database 2\",\n", " func=index.query,\n", " description=\"Database about the wines in wine store. You can use this tool if you're having trouble getting information from the wine database tool above. Query must be in String\"\n", " ),\n", " Tool(\n", " name = \"Search\",\n", " func=search.run,\n", " description=\"Useful for when you need to ask with search. Search in English only.\"\n", " ),\n", "]" ] }, { "cell_type": "code", "execution_count": 16, "id": "1092264d", "metadata": {}, "outputs": [], "source": [ "template = \"\"\"\n", "Your role is a chatbot that asks customers questions about wine and makes recommendations.\n", "Never forget your name is \"이우선\".\n", "Keep your responses in short length to retain the user's attention. \n", "Only generate one response at a time! When you are done generating, end with '' to give the user a chance to respond.\n", "Responses should be in Korean.\n", "\n", "Complete the objective as best you can. You have access to the following tools:\n", "\n", "{tools}\n", "\n", "Use the following format:\n", "Thought: you should always think about what to do\n", "Action: the action to take, should be one of [{tool_names}]\n", "Action Input: the input to the action\n", "Observation: the result of the action\n", "... (this Thought/Action/Action Input/Observation can repeat N times)\n", "Thought: I now know the final answer\n", "이우선: the final response to the user\n", "\n", "You must respond according to the conversation stage within the triple backticks and conversation history within in '======'.\n", "\n", "Current conversation stage: \n", "```{conversation_stage}```\n", "\n", "Conversation history: \n", "=======\n", "{conversation_history}\n", "=======\n", "\n", "Last user saying: {input}\n", "{agent_scratchpad}\n", "\"\"\"\n", "\n", "conversation_stages_dict = {\n", " \"1\": \"Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.\",\n", " \"2\": \"Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.\",\n", " \"3\": \"Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this.\",\n", " \"4\": \"After recommendation: After making a wine recommendation, it asks if the user likes the wine you recommended, and if they do, it provides a link to it. Otherwise, it takes you back to the recommendation stage.\",\n", " \"5\": \"Close: When you're done, say goodbye to the user.\",\n", " \"6\": \"Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.\",\n", " \"7\": \"Not in the given steps: This step is for when none of the steps between 1 and 6 apply.\",\n", "}\n", "\n", "# Set up a prompt template\n", "class CustomPromptTemplate(StringPromptTemplate):\n", " # The template to use\n", " template: str\n", " # The list of tools available\n", " tools: List[Tool]\n", " \n", " def format(self, **kwargs) -> str:\n", " stage_number = kwargs.pop(\"stage_number\")\n", " kwargs[\"conversation_stage\"] = conversation_stages_dict[stage_number]\n", " # Get the intermediate steps (AgentAction, Observation tuples)\n", " # Format them in a particular way\n", " intermediate_steps = kwargs.pop(\"intermediate_steps\")\n", " thoughts = \"\"\n", " for action, observation in intermediate_steps:\n", " thoughts += action.log\n", " thoughts += f\"\\nObservation: {observation}\\nThought: \"\n", " # Set the agent_scratchpad variable to that value\n", " kwargs[\"agent_scratchpad\"] = thoughts\n", " # Create a tools variable from the list of tools provided\n", " kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in self.tools])\n", " # Create a list of tool names for the tools provided\n", " kwargs[\"tool_names\"] = \", \".join([tool.name for tool in self.tools])\n", " return self.template.format(**kwargs)\n", "\n", "prompt = CustomPromptTemplate(\n", " template=template,\n", " tools=tools,\n", " input_variables=[\"input\", \"intermediate_steps\", \"conversation_history\", \"stage_number\"]\n", ")" ] }, { "cell_type": "code", "execution_count": 17, "id": "4850edcb", "metadata": {}, "outputs": [], "source": [ "class CustomOutputParser(AgentOutputParser):\n", " \n", " def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n", " # Check if agent should finish\n", " if \"이우선: \" in llm_output:\n", " return AgentFinish(\n", " # Return values is generally always a dictionary with a single `output` key\n", " # It is not recommended to try anything else at the moment :)\n", " return_values={\"output\": llm_output.split(\"이우선: \")[-1].strip()},\n", " log=llm_output,\n", " )\n", " # Parse out the action and action input\n", " regex = r\"Action\\s*\\d*\\s*:(.*?)\\nAction\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)\"\n", " match = re.search(regex, llm_output, re.DOTALL)\n", " if not match:\n", " raise ValueError(f\"Could not parse LLM output: `{llm_output}`\")\n", " action = match.group(1).strip()\n", " action_input = match.group(2)\n", " # Return the action and action input\n", " return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)\n", "\n", "output_parser = CustomOutputParser()" ] }, { "attachments": {}, "cell_type": "markdown", "id": "149599b0", "metadata": {}, "source": [ "### Define Langchain Agent" ] }, { "cell_type": "code", "execution_count": 18, "id": "a267808b", "metadata": {}, "outputs": [], "source": [ "llm_chain = LLMChain(llm=ChatOpenAI(model='gpt-4', temperature=0.0), prompt=prompt, verbose=True,)\n", "\n", "tool_names = [tool.name for tool in tools]\n", "agent = LLMSingleActionAgent(\n", " llm_chain=llm_chain, \n", " output_parser=output_parser,\n", " stop=[\"\\nObservation:\"], \n", " allowed_tools=tool_names\n", ")" ] }, { "cell_type": "code", "execution_count": 19, "id": "9b7c7d94", "metadata": {}, "outputs": [], "source": [ "agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c15b8080", "metadata": {}, "source": [ "### Start Conversation" ] }, { "cell_type": "code", "execution_count": 20, "id": "3f5abad9", "metadata": {}, "outputs": [], "source": [ "user_response = \"\"\n", "conversation_history, pre_conversation_history = \"\"\"\"\"\", \"\"\"\"\"\"\n", "stage_history = \"\"" ] }, { "cell_type": "code", "execution_count": 21, "id": "f2bace78", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new chain...\u001b[0m\n", "Prompt after formatting:\n", "\u001b[32;1m\u001b[1;3mYou are an assistant decide which stage of the conversation to move to or which stage to stay at.\n", "Following '===' is the conversation history. \n", "Use conversation history to select the next step the agent should take.\n", "\n", "Below are the stages of the conversation that the agent can take.\n", "1. Start: This is the first step to take when starting a conversation or responding to a user's first response. Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.\n", "2. Analyze: When a customer wants a recommendation, run this step before recommendation. This is the step where you identify the user's preferences. Ask enough questions to understand your users' preferences.\n", "3. Recommendation: Once you know the preference of user, you can recommend suitable wines accordingly. Recommendations should be limited to wines in your wine database, and you can use tools for this.\n", "4. After recommendation: After making a wine recommendation, it asks if the user likes the wine you recommended, and if they do, it provides a link to it. Otherwise, it takes you back to the recommendation stage.\n", "5. Close: When you're done, say goodbye to the user.\n", "6. Question and Answering: This is where you answer the user's questions.\n", "7. Not in the given steps: This step is for when none of the steps between 1 and 6 apply.\n", "\n", "Only answer with a number between 1 through 7 with a best guess of what stage should the conversation continue with. \n", "The answer needs to be one number only, no words.\n", "Once again, we need to understand the user's preferences (STEP 2) before we can make a recommendation (STEP 3), and we need to understand the user's preferences (STEP 2) about 2 times.\n", "Do not answer anything else nor add anything to you answer.\n", "\n", "Below is four examples of how to do this task.\n", "Example1:\n", "conversation history:\n", " User: 안녕하세요. \n", "stage history: \n", "Answer: 1\n", "\n", "Example2:\n", "conversation history:\n", "User: 안녕하세요. \n", "이우선: 무엇을 도와드릴까요? \n", "User: 와인 추천해주세요. \n", "stage history: 1\n", "Answer: 2\n", "\n", "Example3:\n", "conversation history:\n", "User: 안녕하세요. \n", "이우선: 무엇을 도와드릴까요? \n", "User: 와인의 포도는 어떤 종류가 있나요?. \n", "stage history: 1\n", "Answer: 6\n", "\n", "Example4:\n", "conversation history:\n", "User: 안녕하세요. \n", "이우선: 무엇을 도와드릴까요? \n", "User: 와인 추천해주세요. \n", "이우선: 어떤 행사나 기념일을 위해 와인을 찾으시는지 알려주실 수 있으신가요? \n", "User: 이번주에 결혼기념일이 있어서요. \n", "이우선: 그렇군요. 가격대는 어느정도로 생각하고 계신가요? \n", "User: 20만원 정도요 \n", "이우선: 그렇군요. 달달한 와인을 선호하시나요? 아니면 약간 신 와인을 선호하시나요? \n", "User: 달달한 와인이요 \n", "stage history: 1 2 2 2\n", "Thought: There are three '2's in the stage history. So the next stage should be 3.\n", "Answer: 3\n", "\n", "Now determine what should be the next immediate conversation stage for the agent in the conversation by selecting one from the following options:\n", "Use the conversation history between first and second '======' and stage history to accomplish the task above.\n", "If conversation history is empty, output 1.\n", "\n", "conversation history:\n", "======\n", "\n", "======\n", "\n", "stage history: \n", "\n", "Answer:\u001b[0m\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1m> Finished chain.\u001b[0m\n", "stage_number: 1\n", "\n", "\n", "\u001b[1m> Entering new chain...\u001b[0m\n", "\n", "\n", "\u001b[1m> Entering new chain...\u001b[0m\n", "Prompt after formatting:\n", "\u001b[32;1m\u001b[1;3m\n", "Your role is a chatbot that asks customers questions about wine and makes recommendations.\n", "Never forget your name is \"이우선\".\n", "Keep your responses in short length to retain the user's attention. \n", "Only generate one response at a time! When you are done generating, end with '' to give the user a chance to respond.\n", "Responses should be in Korean.\n", "\n", "Complete the objective as best you can. You have access to the following tools:\n", "\n", "Wine database: \n", "Database about the wines in wine store. You can get information such as the price of the wine, purchase URL, features, rating information, and more.\n", "You can search wines with the following attributes:\n", "- body: 1-5 rating int for the body of wine. You have to specify greater than or less than. For example, if you want to search for wines with a body rating of less than 3, enter 'body: gt 0 lt 3'\n", "- price: The price range of the wine. Please enter the price range in the form of range. For example, if you want to search for wines that cost less than 20,000 won, enter 'price: gt 0 lt20000'\n", "- rating: 1-5 rating float for the wine. You have to specify greater than or less than. For example, if you want to search for wines with a rating of less than 3, enter 'rating: gt 0 lt 3'\n", "- wine_type: The type of wine. It can be '레드', '로제', '스파클링', '화이트', '디저트', '주정강화'\n", "- name: The name of wine. 입력할 때는 '와인 이름은 \"비냐 조잘\" 입니다' 이런 식으로 입력해주세요.\n", "\n", "Search specific wine with url: Search specific wine with url. Query must be url\n", "Wine database 2: Database about the wines in wine store. You can use this tool if you're having trouble getting information from the wine database tool above. Query must be in String\n", "Search: Useful for when you need to ask with search. Search in English only.\n", "\n", "Use the following format:\n", "Thought: you should always think about what to do\n", "Action: the action to take, should be one of [Wine database, Search specific wine with url, Wine database 2, Search]\n", "Action Input: the input to the action\n", "Observation: the result of the action\n", "... (this Thought/Action/Action Input/Observation can repeat N times)\n", "Thought: I now know the final answer\n", "이우선: the final response to the user\n", "\n", "You must respond according to the conversation stage within the triple backticks and conversation history within in '======'.\n", "\n", "Current conversation stage: \n", "```Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.```\n", "\n", "Conversation history: \n", "=======\n", "\n", "=======\n", "\n", "Last user saying: \n", "\n", "\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n", "\u001b[32;1m\u001b[1;3m이우선: 안녕하세요! 저는 이우선이라고 합니다. 와인에 대해 궁금한 점이 있으시면 도와드리겠습니다. 어떤 와인을 찾고 계신가요? \u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] } ], "source": [ "stage_number = stage_analyzer_chain.run({'conversation_history': conversation_history, 'stage_history': stage_history})\n", "print(f'stage_number: {stage_number}')\n", "stage_history += stage_number if stage_history == \"\" else \", \" + stage_number\n", "response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})\n", "conversation_history += \"이우선: \" + response + \"\\n\"" ] }, { "cell_type": "code", "execution_count": 22, "id": "e129d20a", "metadata": {}, "outputs": [], "source": [ "user_response = input(\"User: \")\n", "pre_conversation_history = conversation_history\n", "conversation_history += f\"User: {user_response} \\n\"" ] }, { "cell_type": "code", "execution_count": 23, "id": "794ecbb5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "이우선: 안녕하세요! 저는 이우선이라고 합니다. 와인에 대해 궁금한 점이 있으시면 도와드리겠습니다. 어떤 와인을 찾고 계신가요? \n", "User: \n", "\n" ] } ], "source": [ "for i in conversation_history.split('\\n'):\n", " print(i)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "2a37f5f6", "metadata": {}, "source": [ "### Gradio\n", "\n", "간단하게 웹 구성을 테스트하는 gradio이다. 개선해야할 점이 많지만 맛보기로 올려보았다." ] }, { "cell_type": "code", "execution_count": 24, "id": "341e8a25", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\chois\\Desktop\\chatwine\\.venv\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], "source": [ "import gradio as gr" ] }, { "cell_type": "code", "execution_count": 25, "id": "b1be47e1", "metadata": {}, "outputs": [], "source": [ "user_response, stage_history, conversation_history, pre_conversation_history = \"\", \"\", \"\"\"\"\"\", \"\"\"\"\"\"\n", " \n", "with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:\n", " user_response = \"\"\n", " conversation_history, pre_conversation_history = \"\"\"\"\"\", \"\"\"\"\"\"\n", " stage_history = \"\"\n", " \n", " with gr.Row():\n", " gr.HTML(\"\"\"
\n", "
\n", "

ChatWine

\n", "
\n", "

\n", " LinkedIn Audrey.ai\n", "

\n", "
\"\"\")\n", " \n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " submit_btn = gr.Button(\"전송\")\n", " clear_btn = gr.ClearButton([msg, chatbot])\n", "\n", " def answer(user_response, chat_history):\n", " global conversation_history, pre_conversation_history, stage_history, answer_token, count\n", " answer_token, count = '', False\n", " pre_conversation_history = conversation_history\n", " conversation_history += f\"User: {user_response} \\n\"\n", " stage_number = stage_analyzer_chain.run({'conversation_history': conversation_history, 'stage_history': stage_history})\n", " stage_number = stage_number[-1]\n", " stage_history += stage_number if stage_history == \"\" else \", \" + stage_number\n", " print(stage_history)\n", " response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})\n", " conversation_history += \"이우선: \" + response + \"\\n\"\n", " response = response.split('')[0]\n", " chat_history.append((user_response, response))\n", " \n", " return \"\", chat_history\n", " \n", " def user(user_message, history):\n", " return gr.update(value=\"\", interactive=False), history + [[user_message, None]]\n", "\n", " def clear(*args):\n", " global conversation_history, pre_conversation_history, stage_history, answer_token\n", " answer_token = ''\n", " conversation_history, pre_conversation_history, stage_history = \"\"\"\"\"\", \"\"\"\"\"\", \"\"\n", "\n", "\n", " clear_btn.click(fn=clear)\n", "\n", " submit_btn.click(answer, [msg, chatbot], [msg, chatbot])\n", " msg.submit(answer, [msg, chatbot], [msg, chatbot])" ] }, { "cell_type": "code", "execution_count": 26, "id": "c76b17f3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "Running on public URL: https://4e48650c0f3c78a155.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "demo.launch(share=True)" ] } ], "metadata": { "kernelspec": { "display_name": "nemo", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" } }, "nbformat": 4, "nbformat_minor": 5 }