sabazo commited on
Commit
2454aa9
2 Parent(s): c285f2f b4b75a1

Merge pull request #15 from almutareb/add-answer-qa-chain

Browse files
.gitignore CHANGED
@@ -164,3 +164,8 @@ cython_debug/
164
  *.sqlite3
165
  *.bin
166
  *.pickle
 
 
 
 
 
 
164
  *.sqlite3
165
  *.bin
166
  *.pickle
167
+
168
+
169
+ # Databases
170
+
171
+ *.db
app_gui.py CHANGED
@@ -1,10 +1,13 @@
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
3
  from fastapi import FastAPI
4
- from rag_app.agents.react_agent import agent_executor
 
5
  # need to import the qa!
6
 
7
  app = FastAPI()
 
 
8
 
9
  if __name__ == "__main__":
10
 
@@ -32,6 +35,10 @@ if __name__ == "__main__":
32
  def infer(question, history):
33
  # Use the question and history to query the RAG model
34
  #result = qa({"query": question, "history": history, "question": question})
 
 
 
 
35
  try:
36
  result = agent_executor.invoke(
37
  {
@@ -39,6 +46,8 @@ if __name__ == "__main__":
39
  "chat_history": history
40
  }
41
  )
 
 
42
  return result
43
  except Exception:
44
  raise gr.Error("Model is Overloaded, Please retry later!")
 
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
3
  from fastapi import FastAPI
4
+ from rag_app.agents.react_agent import agent_executor, llm
5
+ from rag_app.chains import user_response_sentiment_prompt
6
  # need to import the qa!
7
 
8
  app = FastAPI()
9
+ user_sentiment_chain = user_response_sentiment_prompt | llm
10
+
11
 
12
  if __name__ == "__main__":
13
 
 
35
  def infer(question, history):
36
  # Use the question and history to query the RAG model
37
  #result = qa({"query": question, "history": history, "question": question})
38
+ try:
39
+ data = user_sentiment_chain.invoke({"user_reponse":question})
40
+ except Exception as e:
41
+ raise e
42
  try:
43
  result = agent_executor.invoke(
44
  {
 
46
  "chat_history": history
47
  }
48
  )
49
+
50
+
51
  return result
52
  except Exception:
53
  raise gr.Error("Model is Overloaded, Please retry later!")
rag_app/chains/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from rag_app.chains.user_response_sentiment_chain import user_response_sentiment_prompt
rag_app/chains/user_response_sentiment_chain.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import PromptTemplate
2
+
3
+
4
+ user_response_sentiment_template = """
5
+ You will be given a user response to an agent.
6
+ =================
7
+ {user_reponse}
8
+ ====================
9
+ You must determine if the user has has their questions answered.
10
+ If the user seems satisfied respond saying "1" or "0" ONLY.
11
+
12
+ Examples:
13
+ ================
14
+
15
+ Example 1
16
+ USER: Great Work!
17
+ YOUR RESPONSE: 1
18
+ =================
19
+
20
+ USER: I still need help!
21
+ YOUR RESPONSE: 0
22
+ Example 2
23
+ ================================
24
+
25
+ USER: I don't understand what you mean
26
+ YOUR RESPONSE: 0
27
+ Example 3
28
+ ================================
29
+
30
+ USER: That makes sense!
31
+ YOUR RESPONSE: 1
32
+ Example 4
33
+ ================================
34
+
35
+
36
+
37
+ """
38
+
39
+ user_response_sentiment_prompt = PromptTemplate.from_template(user_response_sentiment_template)
40
+
41
+
42
+ # llm_chain = prompt | llms