sabazo commited on
Commit
d398a6a
2 Parent(s): 42c70fb 5a59a40

Merge pull request #21 from almutareb/add_gradio_examples

Browse files
Files changed (2) hide show
  1. app_gui.py +37 -9
  2. rag_app/agents/react_agent.py +8 -2
app_gui.py CHANGED
@@ -18,6 +18,7 @@ if __name__ == "__main__":
18
  def bot(history):
19
  # Obtain the response from the 'infer' function using the latest input
20
  response = infer(history[-1][0], history)
 
21
  history[-1][1] = response['output']
22
  return history
23
 
@@ -34,7 +35,7 @@ if __name__ == "__main__":
34
  )
35
  return result
36
  except Exception:
37
- raise gr.Error("Model is Overloaded, Please retry later!")
38
 
39
  def vote(data: gr.LikeData):
40
  if data.liked:
@@ -42,9 +43,14 @@ if __name__ == "__main__":
42
  else:
43
  print("You downvoted this response: ")
44
 
 
 
 
 
 
45
  # CSS styling for the Gradio interface
46
  css = """
47
- #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
48
  """
49
 
50
  # HTML content for the Gradio interface title
@@ -53,27 +59,49 @@ if __name__ == "__main__":
53
  <p>Hello, I BotTina 2.0, your intelligent AI assistant. I can help you explore Wuerttembergische Versicherungs products.<br />
54
  </div>
55
  """
 
 
 
 
 
 
 
 
 
 
56
 
57
  # Building the Gradio interface
58
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
  with gr.Column(elem_id="col-container"):
60
- gr.HTML(title) # Add the HTML title to the interface
61
  chatbot = gr.Chatbot([], elem_id="chatbot",
62
- label="BotTina 2.0",
63
  bubble_full_width=False,
64
  avatar_images=(None, "https://dacodi-production.s3.amazonaws.com/store/87bc00b6727589462954f2e3ff6f531c.png"),
65
  height=680,) # Initialize the chatbot component
66
  chatbot.like(vote, None, None)
67
- clear = gr.Button("Clear") # Add a button to clear the chat
68
 
69
  # Create a row for the question input
70
  with gr.Row():
71
- question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # Define the action when the question is submitted
74
  question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
75
- bot, chatbot, chatbot
76
- )
 
77
  # Define the action for the clear button
78
  clear.click(lambda: None, None, chatbot, queue=False)
79
 
 
18
  def bot(history):
19
  # Obtain the response from the 'infer' function using the latest input
20
  response = infer(history[-1][0], history)
21
+ print(response)
22
  history[-1][1] = response['output']
23
  return history
24
 
 
35
  )
36
  return result
37
  except Exception:
38
+ raise gr.Warning("Model is Overloaded, please try again in a few minuteslater!")
39
 
40
  def vote(data: gr.LikeData):
41
  if data.liked:
 
43
  else:
44
  print("You downvoted this response: ")
45
 
46
+ def get_examples(input_text: str):
47
+ tmp_history = [(input_text, None)]
48
+ response = infer(input_text, tmp_history)
49
+ return response['output']
50
+
51
  # CSS styling for the Gradio interface
52
  css = """
53
+ #col-container {max-width: 1200px; margin-left: auto; margin-right: auto;}
54
  """
55
 
56
  # HTML content for the Gradio interface title
 
59
  <p>Hello, I BotTina 2.0, your intelligent AI assistant. I can help you explore Wuerttembergische Versicherungs products.<br />
60
  </div>
61
  """
62
+ head_style = """
63
+ <style>
64
+ @media (min-width: 1536px)
65
+ {
66
+ .gradio-container {
67
+ min-width: var(--size-full) !important;
68
+ }
69
+ }
70
+ </style>
71
+ """
72
 
73
  # Building the Gradio interface
74
+ with gr.Blocks(theme=gr.themes.Soft(), title="InsurePal AI 🤵🏻‍♂️", head=head_style) as demo:
75
  with gr.Column(elem_id="col-container"):
76
+ gr.HTML() # Add the HTML title to the interface
77
  chatbot = gr.Chatbot([], elem_id="chatbot",
78
+ label="InsurePal AI",
79
  bubble_full_width=False,
80
  avatar_images=(None, "https://dacodi-production.s3.amazonaws.com/store/87bc00b6727589462954f2e3ff6f531c.png"),
81
  height=680,) # Initialize the chatbot component
82
  chatbot.like(vote, None, None)
 
83
 
84
  # Create a row for the question input
85
  with gr.Row():
86
+ question = gr.Textbox(label="Question", show_label=False, placeholder="Type your question and hit Enter ", scale=4)
87
+ send_btn = gr.Button(value="Send", variant="primary", scale=0)
88
+ with gr.Accordion(label="Beispiele", open=False):
89
+ #examples
90
+ examples = gr.Examples([
91
+ "Welche Versicherungen brauche ich als Student?",
92
+ "Wie melde ich einen Schaden?",
93
+ "Wie kann ich mich als Selbstständiger finanziell absichern?",
94
+ "Welche Versicherungen sollte ich für meine Vorsorge abschliessen?"
95
+ ], inputs=[question], label="") #, cache_examples="lazy", fn=get_examples, outputs=[chatbot]
96
+
97
+ with gr.Row():
98
+ clear = gr.Button("Clear") # Add a button to clear the chat
99
 
100
  # Define the action when the question is submitted
101
  question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
102
+ bot, chatbot, chatbot)
103
+ send_btn.click(add_text, [chatbot, question], [chatbot, question], queue=False).then(
104
+ bot, chatbot, chatbot)
105
  # Define the action for the clear button
106
  clear.click(lambda: None, None, chatbot, queue=False)
107
 
rag_app/agents/react_agent.py CHANGED
@@ -7,6 +7,10 @@ from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
7
  from langchain.tools.render import render_text_description
8
  import os
9
  from dotenv import load_dotenv
 
 
 
 
10
  from rag_app.structured_tools.structured_tools import (
11
  google_search, knowledgeBase_search
12
  )
@@ -22,6 +26,8 @@ GOOGLE_CSE_ID = os.getenv('GOOGLE_CSE_ID')
22
  GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
23
  LLM_MODEL = os.getenv('LLM_MODEL')
24
 
 
 
25
  # Load the model from the Hugging Face Hub
26
  llm = HuggingFaceEndpoint(repo_id=LLM_MODEL,
27
  temperature=0.1,
@@ -65,8 +71,8 @@ agent_executor = AgentExecutor(
65
  agent=agent,
66
  tools=tools,
67
  verbose=True,
68
- max_iterations=10, # cap number of iterations
69
- #max_execution_time=60, # timout at 60 sec
70
  return_intermediate_steps=True,
71
  handle_parsing_errors=True,
72
  )
 
7
  from langchain.tools.render import render_text_description
8
  import os
9
  from dotenv import load_dotenv
10
+ # local cache
11
+ from langchain.globals import set_llm_cache
12
+ from langchain.cache import SQLiteCache # sqlite
13
+ #from langchain.cache import InMemoryCache # in memory cache
14
  from rag_app.structured_tools.structured_tools import (
15
  google_search, knowledgeBase_search
16
  )
 
26
  GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
27
  LLM_MODEL = os.getenv('LLM_MODEL')
28
 
29
+ set_llm_cache(SQLiteCache(database_path=".cache.db"))
30
+
31
  # Load the model from the Hugging Face Hub
32
  llm = HuggingFaceEndpoint(repo_id=LLM_MODEL,
33
  temperature=0.1,
 
71
  agent=agent,
72
  tools=tools,
73
  verbose=True,
74
+ max_iterations=20, # cap number of iterations
75
+ max_execution_time=90, # timout at 60 sec
76
  return_intermediate_steps=True,
77
  handle_parsing_errors=True,
78
  )