capradeepgujaran commited on
Commit
32531dc
β€’
1 Parent(s): 1ded6a7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +235 -0
app.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ from PIL import Image
5
+ import pytesseract
6
+ import gradio as gr
7
+ from pdf2image import convert_from_path
8
+ import PyPDF2
9
+ from llama_index.core import VectorStoreIndex, Document
10
+ from llama_index.embeddings.openai import OpenAIEmbedding
11
+ from llama_index.llms.openai import OpenAI
12
+ from llama_index.core import get_response_synthesizer
13
+ from dotenv import load_dotenv
14
+ from sentence_transformers import SentenceTransformer, util
15
+ import logging
16
+ from openai_tts_tool import generate_audio_and_text # Importing from openai_tts_tool
17
+
18
+ # Set up logging configuration
19
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
20
+
21
+ # Load environment variables from .env file
22
+ load_dotenv()
23
+
24
+ # Initialize global variables
25
+ vector_index = None
26
+ query_log = []
27
+ sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
28
+
29
+ langs = os.popen('tesseract --list-langs').read().split('\n')[1:-1]
30
+
31
+ # Preprocessing function
32
+ def preprocess_image(image_path):
33
+ img = cv2.imread(image_path)
34
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
35
+ gray = cv2.equalizeHist(gray)
36
+ gray = cv2.GaussianBlur(gray, (5, 5), 0)
37
+ processed_image = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
38
+ cv2.THRESH_BINARY, 11, 2)
39
+ temp_filename = "processed_image.png"
40
+ cv2.imwrite(temp_filename, processed_image)
41
+ return temp_filename
42
+
43
+ # Function to extract text from images
44
+ def extract_text_from_image(image_path, lang='eng'):
45
+ processed_image_path = preprocess_image(image_path)
46
+ text = pytesseract.image_to_string(Image.open(processed_image_path), lang=lang)
47
+ return text
48
+
49
+ # Function to extract text from PDFs
50
+ def extract_text_from_pdf(pdf_path, lang='eng'):
51
+ text = ""
52
+ try:
53
+ with open(pdf_path, 'rb') as file:
54
+ pdf_reader = PyPDF2.PdfReader(file)
55
+ for page_num in range(len(pdf_reader.pages)):
56
+ page = pdf_reader.pages[page_num]
57
+ page_text = page.extract_text()
58
+ if page_text.strip():
59
+ text += page_text
60
+ else:
61
+ images = convert_from_path(pdf_path, first_page=page_num + 1, last_page=page_num + 1)
62
+ for image in images:
63
+ image.save('temp_image.png', 'PNG')
64
+ text += extract_text_from_image('temp_image.png', lang=lang)
65
+ text += f"\n[OCR applied on page {page_num + 1}]\n"
66
+ except Exception as e:
67
+ return f"Error processing PDF: {str(e)}"
68
+ return text
69
+
70
+ # General function to handle different file types
71
+ def extract_text(file_path, lang='eng'):
72
+ file_ext = file_path.lower().split('.')[-1]
73
+ if file_ext in ['pdf']:
74
+ return extract_text_from_pdf(file_path, lang)
75
+ elif file_ext in ['png', 'jpg', 'jpeg']:
76
+ return extract_text_from_image(file_path, lang)
77
+ else:
78
+ return f"Unsupported file type: {file_ext}"
79
+
80
+ # Process uploaded documents and index them
81
+ def process_upload(api_key, files, lang):
82
+ global vector_index
83
+
84
+ if not api_key:
85
+ return "Please provide a valid OpenAI API Key.", None
86
+
87
+ if not files:
88
+ return "No files uploaded.", None
89
+
90
+ documents = []
91
+ error_messages = []
92
+ image_heavy_docs = []
93
+
94
+ for file_path in files:
95
+ try:
96
+ text = extract_text(file_path, lang)
97
+ if "This document consists of" in text and "page(s) of images" in text:
98
+ image_heavy_docs.append(os.path.basename(file_path))
99
+ documents.append(Document(text=text))
100
+ except Exception as e:
101
+ error_message = f"Error processing file {file_path}: {str(e)}"
102
+ logging.error(error_message)
103
+ error_messages.append(error_message)
104
+
105
+ if documents:
106
+ try:
107
+ embed_model = OpenAIEmbedding(model="text-embedding-3-large", api_key=api_key)
108
+ vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
109
+
110
+ success_message = f"Successfully indexed {len(documents)} files."
111
+ if image_heavy_docs:
112
+ success_message += f"\nNote: The following documents consist mainly of images and may require manual review: {', '.join(image_heavy_docs)}"
113
+ if error_messages:
114
+ success_message += f"\nErrors: {'; '.join(error_messages)}"
115
+
116
+ return success_message, vector_index
117
+ except Exception as e:
118
+ return f"Error creating index: {str(e)}", None
119
+ else:
120
+ return f"No valid documents were indexed. Errors: {'; '.join(error_messages)}", None
121
+
122
+ # Function to calculate similarity
123
+ def calculate_similarity(response, ground_truth):
124
+ response_embedding = sentence_model.encode(response, convert_to_tensor=True)
125
+ truth_embedding = sentence_model.encode(ground_truth, convert_to_tensor=True)
126
+
127
+ response_embedding = response_embedding / np.linalg.norm(response_embedding)
128
+ truth_embedding = truth_embedding / np.linalg.norm(truth_embedding)
129
+
130
+ similarity = np.dot(response_embedding, truth_embedding)
131
+ similarity_percentage = (similarity + 1) / 2 * 100
132
+
133
+ return similarity_percentage
134
+
135
+ # Function to query documents
136
+ def query_app(query, model_name, use_similarity_check, openai_api_key):
137
+ global vector_index, query_log
138
+
139
+ if vector_index is None:
140
+ logging.error("No documents indexed yet. Please upload documents first.")
141
+ return "No documents indexed yet. Please upload documents first.", None
142
+
143
+ if not openai_api_key:
144
+ logging.error("No OpenAI API Key provided.")
145
+ return "Please provide a valid OpenAI API Key.", None
146
+
147
+ try:
148
+ llm = OpenAI(model=model_name, api_key=openai_api_key)
149
+ except Exception as e:
150
+ logging.error(f"Error initializing the OpenAI model: {e}")
151
+ return f"Error initializing the OpenAI model: {e}", None
152
+
153
+ response_synthesizer = get_response_synthesizer(llm=llm)
154
+ query_engine = vector_index.as_query_engine(llm=llm, response_synthesizer=response_synthesizer)
155
+
156
+ try:
157
+ response = query_engine.query(query)
158
+ except Exception as e:
159
+ logging.error(f"Error during query processing: {e}")
160
+ return f"Error during query processing: {e}", None
161
+
162
+ generated_response = response.response
163
+ query_log.append({
164
+ "query_id": str(len(query_log) + 1),
165
+ "query": query,
166
+ "gt_answer": "Placeholder ground truth answer",
167
+ "response": generated_response,
168
+ "retrieved_context": [{"text": doc.text} for doc in response.source_nodes]
169
+ })
170
+
171
+ metrics = {}
172
+
173
+ if use_similarity_check:
174
+ try:
175
+ logging.info("Similarity check is enabled. Calculating similarity.")
176
+ similarity = calculate_similarity(generated_response, "Placeholder ground truth answer")
177
+ metrics['similarity'] = similarity
178
+ logging.info(f"Similarity calculated: {similarity}")
179
+ except Exception as e:
180
+ logging.error(f"Error during similarity calculation: {e}")
181
+ metrics['error'] = f"Error during similarity calculation: {e}"
182
+
183
+ return generated_response, metrics if use_similarity_check else None
184
+
185
+ # Function to generate audio and text (integrating from openai_tts_tool.py)
186
+ def process_tts(api_key, input_text, model_name, voice_type, voice_speed, language, output_option, summary_length, additional_prompt):
187
+ try:
188
+ return generate_audio_and_text(api_key, input_text, model_name, voice_type, voice_speed, language, output_option, summary_length, additional_prompt)
189
+ except Exception as e:
190
+ logging.error(f"Error during TTS generation: {e}")
191
+ return f"Error during TTS generation: {e}", None
192
+
193
+ # Main function with Gradio interface
194
+ def main():
195
+ with gr.Blocks(title="Document Processing and TTS App") as demo:
196
+ gr.Markdown("# πŸ“„ Document Processing, Text & Audio Generation App")
197
+
198
+ # Upload documents and chat functionality
199
+ with gr.Tab("πŸ“€ Upload Documents"):
200
+ api_key_input = gr.Textbox(label="Enter OpenAI API Key", placeholder="Paste your OpenAI API Key here")
201
+ file_upload = gr.File(label="Upload Files", file_count="multiple", type="filepath")
202
+ lang_dropdown = gr.Dropdown(choices=langs, label="Select OCR Language", value='eng')
203
+ upload_button = gr.Button("Upload and Index")
204
+ upload_status = gr.Textbox(label="Status", interactive=False)
205
+ upload_button.click(fn=process_upload, inputs=[api_key_input, file_upload, lang_dropdown], outputs=[upload_status])
206
+
207
+ # Chat with document
208
+ with gr.Tab("❓ Ask a Question"):
209
+ query_input = gr.Textbox(label="Enter your question")
210
+ model_dropdown = gr.Dropdown(choices=["gpt-4o", "gpt-4o-mini"], label="Select Model", value="gpt-4o")
211
+ similarity_checkbox = gr.Checkbox(label="Use Similarity Check", value=False)
212
+ query_button = gr.Button("Ask")
213
+ answer_output = gr.Textbox(label="Answer", interactive=False)
214
+ metrics_output = gr.JSON(label="Metrics")
215
+ query_button.click(fn=query_app, inputs=[query_input, model_dropdown, similarity_checkbox, api_key_input], outputs=[answer_output, metrics_output])
216
+
217
+ # Text-to-Speech generation
218
+ with gr.Tab("πŸ—£οΈ Generate Audio and Text"):
219
+ text_input = gr.Textbox(label="Enter text for generation")
220
+ voice_type = gr.Dropdown(choices=["alloy", "echo", "fable", "onyx"], label="Voice Type", value="alloy")
221
+ voice_speed = gr.Dropdown(choices=["normal", "slow", "fast"], label="Voice Speed", value="normal")
222
+ language = gr.Dropdown(choices=["en", "ar", "de", "hi"], label="Language", value="en")
223
+ output_option = gr.Radio(choices=["audio", "summary_text", "both"], label="Output Option", value="both")
224
+ summary_length = gr.Number(label="Summary Length", value=100)
225
+ additional_prompt = gr.Textbox(label="Additional Prompt (Optional)")
226
+ generate_button = gr.Button("Generate")
227
+ audio_output = gr.Audio(label="Generated Audio", interactive=False)
228
+ summary_output = gr.Textbox(label="Generated Summary Text", interactive=False)
229
+
230
+ generate_button.click(fn=process_tts, inputs=[api_key_input, text_input, model_dropdown, voice_type, voice_speed, language, output_option, summary_length, additional_prompt], outputs=[audio_output, summary_output])
231
+
232
+ demo.launch()
233
+
234
+ if __name__ == "__main__":
235
+ main()