datascientist22 commited on
Commit
7ff270d
1 Parent(s): d9e1771

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -28
app.py CHANGED
@@ -64,34 +64,42 @@ def extract_text_from_pdfs(files):
64
  return text
65
 
66
  # Handle the query submission
67
- if submit_button and query:
68
- # Extract text from uploaded PDFs
69
- if uploaded_files:
70
- pdf_text = extract_text_from_pdfs(uploaded_files)
71
-
72
- # Prepare the input prompt
73
- prompt = f"""
74
- Based on the following context/document:
75
- {pdf_text}
76
- Please answer the question: {query}
77
- """
78
-
79
- # Encode the input text
80
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
81
-
82
- # Generate the response
83
- outputs = model.generate(
84
- input_ids=input_ids,
85
- max_new_tokens=500,
86
- no_repeat_ngram_size=5,
87
- )
88
-
89
- # Decode the response and clean it
90
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
91
- clean_response = response.strip()
92
-
93
- # Update chat history
94
- st.session_state.chat_history.append((query, clean_response))
 
 
 
 
 
 
 
 
95
 
96
  # Display chat history
97
  if st.session_state.chat_history:
 
64
  return text
65
 
66
  # Handle the query submission
67
+ if submit_button:
68
+ if not query:
69
+ st.warning("⚠️ Please enter a query before submitting.")
70
+ elif not uploaded_files:
71
+ st.warning("⚠️ Please upload at least one PDF file before submitting.")
72
+ else:
73
+ try:
74
+ # Extract text from uploaded PDFs
75
+ pdf_text = extract_text_from_pdfs(uploaded_files)
76
+
77
+ # Prepare the input prompt
78
+ prompt = f"""
79
+ Based on the following context/document:
80
+ {pdf_text}
81
+ Please answer the question: {query}
82
+ """
83
+
84
+ # Encode the input text
85
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
86
+
87
+ # Generate the response
88
+ outputs = model.generate(
89
+ input_ids=input_ids,
90
+ max_new_tokens=500,
91
+ no_repeat_ngram_size=5,
92
+ )
93
+
94
+ # Decode the response and clean it
95
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
96
+ clean_response = response.strip()
97
+
98
+ # Update chat history
99
+ st.session_state.chat_history.append((query, clean_response))
100
+
101
+ except Exception as e:
102
+ st.error(f"An error occurred: {e}")
103
 
104
  # Display chat history
105
  if st.session_state.chat_history: