isayahc commited on
Commit
aab5640
1 Parent(s): c5cb439

added new chain

Browse files
.gitignore CHANGED
@@ -164,3 +164,6 @@ cython_debug/
164
  *.sqlite3
165
  *.bin
166
  *.pickle
 
 
 
 
164
  *.sqlite3
165
  *.bin
166
  *.pickle
167
+
168
+
169
+ *.db
app_gui.py CHANGED
@@ -1,92 +1,107 @@
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
 
 
 
 
 
 
 
3
  from rag_app.agents.react_agent import agent_executor
4
- from config import db
5
  # need to import the qa!
6
- db.create_new_session()
7
 
8
 
9
- def add_text(history, text):
10
- """Function to add a new input to the chat history
11
-
12
- Return: return_description
13
- """
14
-
15
- # Append the new text to the history with a placeholder for the response
16
- history = history + [(text, None)]
17
- return history, ""
18
-
19
 
20
- def bot(history):
21
- """Function representing the bot's response mechanism
22
-
23
- """
24
-
25
- # Obtain the response from the 'infer' function using the latest input
26
- response = infer(history[-1][0], history)
27
- #sources = [doc.metadata.get("source") for doc in response['source_documents']]
28
- #src_list = '\n'.join(sources)
29
- #print_this = response['result'] + "\n\n\n Sources: \n\n\n" + src_list
30
 
 
 
 
 
 
 
 
 
31
 
32
- #history[-1][1] = print_this #response['answer']
33
- # Update the history with the bot's response
34
- history[-1][1] = response['output']
35
- return history
36
 
 
 
 
 
 
37
 
38
- def infer(question, history):
39
- """Function to infer the response using the RAG model
40
-
41
- """
42
-
43
- # Use the question and history to query the RAG model
44
- try:
45
- result = agent_executor.invoke(
46
- {
47
- "input": question,
48
- "chat_history": history
49
- }
50
- )
51
- return result
52
- except Exception:
53
- raise gr.Error("Model is Overloaded, Please retry later!")
54
-
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # CSS styling for the Gradio interface
58
- css = """
59
- #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
60
- """
61
 
62
- # HTML content for the Gradio interface title
63
- title = """
64
- <div style="text-align:left;">
65
- <p>Hello, I BotTina 2.0, your intelligent AI assistant. I can help you explore Wuerttembergische Versicherungs products.<br />
66
- </div>
67
- """
68
 
69
- # Building the Gradio interface
70
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
71
- with gr.Column(elem_id="col-container"):
72
- gr.HTML(title) # Add the HTML title to the interface
73
- chatbot = gr.Chatbot([], elem_id="chatbot",
74
- label="BotTina 2.0",
75
- bubble_full_width=False,
76
- avatar_images=(None, "https://dacodi-production.s3.amazonaws.com/store/87bc00b6727589462954f2e3ff6f531c.png"),
77
- height=680,) # Initialize the chatbot component
78
- clear = gr.Button("Clear") # Add a button to clear the chat
 
79
 
80
- # Create a row for the question input
81
- with gr.Row():
82
- question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
83
 
84
- # Define the action when the question is submitted
85
- question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
86
- bot, chatbot, chatbot
87
- )
88
- # Define the action for the clear button
89
- clear.click(lambda: None, None, chatbot, queue=False)
90
 
91
- # Launch the Gradio demo interface
92
- demo.launch(share=False, debug=True)
 
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
3
+ from fastapi import FastAPI
4
+ from rag_app.agents.react_agent import agent_executor, llm
5
+ from rag_app.chains import user_response_sentiment_prompt
6
+ from typing import Dict
7
+ import re
8
+ from rag_app.utils.utils import extract_responses
9
+ from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
10
  from rag_app.agents.react_agent import agent_executor
 
11
  # need to import the qa!
 
12
 
13
 
 
 
 
 
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ app = FastAPI()
17
+ get_chroma_vs()
18
+ user_sentiment_chain = user_response_sentiment_prompt | llm
19
+ # data = user_sentiment_chain.invoke({"user_reponse":"thanks for the help"})
20
+ data = user_sentiment_chain.invoke({"user_reponse":"OMG I AM SO LOST!!! HELP!!!"})
21
+ responses = extract_responses(data)
22
+ if responses['AI'] == "1":
23
+ print("GG")
24
 
25
+ if __name__ == "__main__":
 
 
 
26
 
27
+ # Function to add a new input to the chat history
28
+ def add_text(history, text):
29
+ # Append the new text to the history with a placeholder for the response
30
+ history = history + [(text, None)]
31
+ return history, ""
32
 
33
+ # Function representing the bot's response mechanism
34
+ def bot(history):
35
+ # Obtain the response from the 'infer' function using the latest input
36
+ response = infer(history[-1][0], history)
37
+ history[-1][1] = response['output']
38
+ return history
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Function to infer the response using the RAG model
41
+ def infer(question, history):
42
+ # Use the question and history to query the RAG model
43
+ #result = qa({"query": question, "history": history, "question": question})
44
+ # try:
45
+ # data = user_sentiment_chain.invoke({"user_reponse":question})
46
+ # responses = extract_responses(data)
47
+ # if responses['AI'] == "1":
48
+ # pass
49
+ # # Do important stuff here plox
50
+ # # store into database
51
+ # except Exception as e:
52
+ # raise e
53
+ try:
54
+ result = agent_executor.invoke(
55
+ {
56
+ "input": question,
57
+ "chat_history": history
58
+ }
59
+ )
60
+ return result
61
+ except Exception as e:
62
+ # raise gr.Error("Model is Overloaded, Please retry later!")
63
+ raise e
64
+
65
+ def vote(data: gr.LikeData):
66
+ if data.liked:
67
+ print("You upvoted this response: ")
68
+ else:
69
+ print("You downvoted this response: ")
70
 
71
+ # CSS styling for the Gradio interface
72
+ css = """
73
+ #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
74
+ """
75
 
76
+ # HTML content for the Gradio interface title
77
+ title = """
78
+ <div style="text-align:left;">
79
+ <p>Hello, I BotTina 2.0, your intelligent AI assistant. I can help you explore Wuerttembergische Versicherungs products.<br />
80
+ </div>
81
+ """
82
 
83
+ # Building the Gradio interface
84
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
85
+ with gr.Column(elem_id="col-container"):
86
+ gr.HTML(title) # Add the HTML title to the interface
87
+ chatbot = gr.Chatbot([], elem_id="chatbot",
88
+ label="BotTina 2.0",
89
+ bubble_full_width=False,
90
+ avatar_images=(None, "https://dacodi-production.s3.amazonaws.com/store/87bc00b6727589462954f2e3ff6f531c.png"),
91
+ height=680,) # Initialize the chatbot component
92
+ chatbot.like(vote, None, None)
93
+ clear = gr.Button("Clear") # Add a button to clear the chat
94
 
95
+ # Create a row for the question input
96
+ with gr.Row():
97
+ question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
98
 
99
+ # Define the action when the question is submitted
100
+ question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
101
+ bot, chatbot, chatbot
102
+ )
103
+ # Define the action for the clear button
104
+ clear.click(lambda: None, None, chatbot, queue=False)
105
 
106
+ # Launch the Gradio demo interface
107
+ demo.queue().launch(share=False, debug=True)
rag_app/chains/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # from rag_app.chains.s
rag_app/chains/user_response_sentiment_chain.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import PromptTemplate
2
+
3
+
4
+ user_response_sentiment_template = """
5
+ You will be given a user response to an agent.
6
+ =================
7
+ {user_reponse}
8
+ ====================
9
+ You must determine if the user has has their questions answered.
10
+ If the user seems satisfied respond saying "1" or "0" ONLY.
11
+
12
+ Examples:
13
+ ================
14
+
15
+ Example 1
16
+ USER: Great Work!
17
+ YOUR RESPONSE: 1
18
+ =================
19
+
20
+ USER: I still need help!
21
+ YOUR RESPONSE: 0
22
+ Example 2
23
+ ================================
24
+
25
+ USER: I don't understand what you mean
26
+ YOUR RESPONSE: 0
27
+ Example 3
28
+ ================================
29
+
30
+ USER: That makes sense!
31
+ YOUR RESPONSE: 1
32
+ Example 4
33
+ ================================
34
+
35
+
36
+
37
+ """
38
+
39
+ user_response_sentiment_prompt = PromptTemplate.from_template(user_response_sentiment_template)
40
+
41
+
42
+ # llm_chain = prompt | llms