Spaces:
Sleeping
Sleeping
add try except function
Browse files
app.py
CHANGED
@@ -237,73 +237,75 @@ def main():
|
|
237 |
embeddings = load_embeddings()
|
238 |
sp_docs = split_docs(documents = data)
|
239 |
st.write(f"This document have {len(sp_docs)} chunks")
|
240 |
-
st.write(sp_docs)
|
241 |
sp_docs_list.extend(sp_docs)
|
242 |
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
|
258 |
-
# qa_chain = ConversationalRetrievalChain(
|
259 |
-
# retriever =db.as_retriever(search_kwargs={'k':2}),
|
260 |
-
# question_generator=question_generator,
|
261 |
-
# #condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
262 |
-
# combine_docs_chain=doc_chain,
|
263 |
-
# return_source_documents=True,
|
264 |
-
# memory = memory,
|
265 |
-
# #get_chat_history=lambda h :h
|
266 |
-
# )
|
267 |
-
|
268 |
-
for message in st.session_state.messages:
|
269 |
-
with st.chat_message(message["role"]):
|
270 |
-
st.markdown(message["content"])
|
271 |
-
|
272 |
-
# Accept user input
|
273 |
-
if query := st.chat_input("What is up?"):
|
274 |
-
# Display user message in chat message container
|
275 |
-
with st.chat_message("user"):
|
276 |
-
st.markdown(query)
|
277 |
-
# Add user message to chat history
|
278 |
-
st.session_state.messages.append({"role": "user", "content": query})
|
279 |
-
|
280 |
-
start = time.time()
|
281 |
-
|
282 |
-
response = qa_chain({'query': query})
|
283 |
-
|
284 |
-
#url_list = set([i.metadata['page'] for i in response['source_documents']])
|
285 |
-
#print(f"condensed quesion : {question_generator.run({'chat_history': response['chat_history'], 'question' : query})}")
|
286 |
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
|
308 |
if __name__ == '__main__':
|
309 |
main()
|
|
|
237 |
embeddings = load_embeddings()
|
238 |
sp_docs = split_docs(documents = data)
|
239 |
st.write(f"This document have {len(sp_docs)} chunks")
|
|
|
240 |
sp_docs_list.extend(sp_docs)
|
241 |
|
242 |
+
try :
|
243 |
+
db = FAISS.from_documents(sp_docs_list, embeddings)
|
244 |
+
memory = ConversationBufferMemory(memory_key="chat_history",
|
245 |
+
return_messages=True,
|
246 |
+
input_key="query",
|
247 |
+
output_key="result")
|
248 |
+
qa_chain = RetrievalQA.from_chain_type(
|
249 |
+
llm = llm,
|
250 |
+
chain_type = "stuff",
|
251 |
+
retriever = db.as_retriever(search_kwargs = {'k':3}),
|
252 |
+
return_source_documents = True,
|
253 |
+
memory = memory,
|
254 |
+
chain_type_kwargs = {"prompt":qa_prompt})
|
|
|
255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
|
257 |
+
# qa_chain = ConversationalRetrievalChain(
|
258 |
+
# retriever =db.as_retriever(search_kwargs={'k':2}),
|
259 |
+
# question_generator=question_generator,
|
260 |
+
# #condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
261 |
+
# combine_docs_chain=doc_chain,
|
262 |
+
# return_source_documents=True,
|
263 |
+
# memory = memory,
|
264 |
+
# #get_chat_history=lambda h :h
|
265 |
+
# )
|
266 |
+
|
267 |
+
for message in st.session_state.messages:
|
268 |
+
with st.chat_message(message["role"]):
|
269 |
+
st.markdown(message["content"])
|
270 |
+
|
271 |
+
# Accept user input
|
272 |
+
if query := st.chat_input("What is up?"):
|
273 |
+
# Display user message in chat message container
|
274 |
+
with st.chat_message("user"):
|
275 |
+
st.markdown(query)
|
276 |
+
# Add user message to chat history
|
277 |
+
st.session_state.messages.append({"role": "user", "content": query})
|
278 |
+
|
279 |
+
start = time.time()
|
280 |
+
|
281 |
+
response = qa_chain({'query': query})
|
282 |
+
|
283 |
+
#url_list = set([i.metadata['page'] for i in response['source_documents']])
|
284 |
+
#print(f"condensed quesion : {question_generator.run({'chat_history': response['chat_history'], 'question' : query})}")
|
285 |
|
286 |
+
with st.chat_message("assistant"):
|
287 |
+
st.markdown(response['result'])
|
288 |
+
|
289 |
+
end = time.time()
|
290 |
+
st.write("Respone time:",int(end-start),"sec")
|
291 |
+
print(response)
|
292 |
+
|
293 |
+
# Add assistant response to chat history
|
294 |
+
st.session_state.messages.append({"role": "assistant", "content": response['result']})
|
295 |
+
|
296 |
+
with st.expander("See the related documents"):
|
297 |
+
for count, url in enumerate(response['source_documents']):
|
298 |
+
#url_reg = regex_source(url)
|
299 |
+
st.write(str(count+1)+":", url)
|
300 |
+
|
301 |
+
clear_button = st.button("Start new convo")
|
302 |
+
if clear_button :
|
303 |
+
st.session_state.messages = []
|
304 |
+
qa_chain.memory.chat_memory.clear()
|
305 |
+
|
306 |
+
except :
|
307 |
+
st.write("Plaese upload your pdf file.")
|
308 |
+
|
309 |
|
310 |
if __name__ == '__main__':
|
311 |
main()
|