Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
|
|
3 |
import time
|
4 |
import streamlit_analytics
|
5 |
from dotenv import load_dotenv
|
@@ -87,13 +88,52 @@ def load_chatbot():
|
|
87 |
return load_qa_chain(llm=OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff")
|
88 |
|
89 |
|
90 |
-
|
91 |
def display_chat_history(chat_history):
|
92 |
for chat in chat_history:
|
93 |
background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf"
|
94 |
st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)
|
95 |
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
|
99 |
def page1():
|
@@ -166,11 +206,12 @@ def page1():
|
|
166 |
start_time = time.time()
|
167 |
|
168 |
with st.spinner('Bot is thinking...'):
|
169 |
-
# Use the VectorStore loaded at the start from the session state
|
170 |
chain = load_chatbot()
|
171 |
docs = VectorStore.similarity_search(query=query, k=3)
|
172 |
with get_openai_callback() as cb:
|
173 |
response = chain.run(input_documents=docs, question=query)
|
|
|
|
|
174 |
|
175 |
|
176 |
# Stop timing
|
@@ -278,11 +319,12 @@ def page2():
|
|
278 |
start_time = time.time()
|
279 |
|
280 |
with st.spinner('Bot is thinking...'):
|
281 |
-
# Use the VectorStore loaded at the start from the session state
|
282 |
chain = load_chatbot()
|
283 |
docs = VectorStore.similarity_search(query=query, k=3)
|
284 |
with get_openai_callback() as cb:
|
285 |
response = chain.run(input_documents=docs, question=query)
|
|
|
|
|
286 |
|
287 |
|
288 |
# Stop timing
|
@@ -336,4 +378,4 @@ def main():
|
|
336 |
|
337 |
|
338 |
if __name__ == "__main__":
|
339 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
+
import random
|
4 |
import time
|
5 |
import streamlit_analytics
|
6 |
from dotenv import load_dotenv
|
|
|
88 |
return load_qa_chain(llm=OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff")
|
89 |
|
90 |
|
|
|
91 |
def display_chat_history(chat_history):
|
92 |
for chat in chat_history:
|
93 |
background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf"
|
94 |
st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)
|
95 |
|
96 |
|
97 |
+
def handle_no_answer(response):
|
98 |
+
no_answer_phrases = [
|
99 |
+
"ich weiß es nicht",
|
100 |
+
"ich weiß nicht",
|
101 |
+
"ich bin mir nicht sicher",
|
102 |
+
"es wird nicht erwähnt",
|
103 |
+
"Leider kann ich diese Frage nicht beantworten",
|
104 |
+
"kann ich diese Frage nicht beantworten",
|
105 |
+
"ich kann diese Frage nicht beantworten",
|
106 |
+
"ich kann diese Frage leider nicht beantworten",
|
107 |
+
"keine information",
|
108 |
+
"das ist unklar",
|
109 |
+
"da habe ich keine antwort",
|
110 |
+
"das kann ich nicht beantworten",
|
111 |
+
"i don't know",
|
112 |
+
"i am not sure",
|
113 |
+
"it is not mentioned",
|
114 |
+
"no information",
|
115 |
+
"that is unclear",
|
116 |
+
"i have no answer",
|
117 |
+
"i cannot answer that",
|
118 |
+
"unable to provide an answer",
|
119 |
+
"not enough context",
|
120 |
+
]
|
121 |
+
|
122 |
+
alternative_responses = [
|
123 |
+
"Hmm, das ist eine knifflige Frage. Lass uns das gemeinsam erkunden. Kannst du mehr Details geben?",
|
124 |
+
"Interessante Frage! Ich bin mir nicht sicher, aber wir können es herausfinden. Hast du weitere Informationen?",
|
125 |
+
"Das ist eine gute Frage. Ich habe momentan keine Antwort darauf, aber vielleicht kannst du sie anders formulieren?",
|
126 |
+
"Da bin ich überfragt. Kannst du die Frage anders stellen oder mir mehr Kontext geben?",
|
127 |
+
"Ich stehe hier etwas auf dem Schlauch. Gibt es noch andere Aspekte der Frage, die wir betrachten könnten?",
|
128 |
+
# Add more alternative responses as needed
|
129 |
+
]
|
130 |
+
|
131 |
+
# Check if response matches any phrase in no_answer_phrases
|
132 |
+
if any(phrase in response.lower() for phrase in no_answer_phrases):
|
133 |
+
return random.choice(alternative_responses) # Randomly select a response
|
134 |
+
return response
|
135 |
+
|
136 |
+
|
137 |
|
138 |
|
139 |
def page1():
|
|
|
206 |
start_time = time.time()
|
207 |
|
208 |
with st.spinner('Bot is thinking...'):
|
|
|
209 |
chain = load_chatbot()
|
210 |
docs = VectorStore.similarity_search(query=query, k=3)
|
211 |
with get_openai_callback() as cb:
|
212 |
response = chain.run(input_documents=docs, question=query)
|
213 |
+
response = handle_no_answer(response) # Process the response through the new function
|
214 |
+
|
215 |
|
216 |
|
217 |
# Stop timing
|
|
|
319 |
start_time = time.time()
|
320 |
|
321 |
with st.spinner('Bot is thinking...'):
|
|
|
322 |
chain = load_chatbot()
|
323 |
docs = VectorStore.similarity_search(query=query, k=3)
|
324 |
with get_openai_callback() as cb:
|
325 |
response = chain.run(input_documents=docs, question=query)
|
326 |
+
response = handle_no_answer(response) # Process the response through the new function
|
327 |
+
|
328 |
|
329 |
|
330 |
# Stop timing
|
|
|
378 |
|
379 |
|
380 |
if __name__ == "__main__":
|
381 |
+
main()
|