capradeepgujaran
commited on
Commit
•
5b0dfba
1
Parent(s):
0052d38
Update app.py
Browse files
app.py
CHANGED
@@ -144,16 +144,28 @@ class VideoQAInterface:
|
|
144 |
self.frame_index = None
|
145 |
self.frame_data = None
|
146 |
self.processed = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
def process_video(self, video_file, progress=gr.Progress()):
|
149 |
"""Process video with progress tracking"""
|
150 |
try:
|
151 |
if video_file is None:
|
152 |
return "Please upload a video first."
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
154 |
progress(0, desc="Starting video processing...")
|
155 |
self.frame_index, self.frame_data, message = self.processor.process_video(
|
156 |
-
|
157 |
)
|
158 |
|
159 |
if self.frame_index is not None:
|
@@ -170,7 +182,7 @@ class VideoQAInterface:
|
|
170 |
@torch.no_grad()
|
171 |
def answer_question(self, query):
|
172 |
"""Answer questions about the video"""
|
173 |
-
if not self.processed:
|
174 |
return None, "Please process a video first."
|
175 |
|
176 |
try:
|
@@ -192,22 +204,27 @@ class VideoQAInterface:
|
|
192 |
descriptions = []
|
193 |
frames = []
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
203 |
-
frames.append(Image.fromarray(frame_rgb))
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
-
|
|
|
211 |
|
212 |
combined_desc = "\n\nFrame Analysis:\n\n"
|
213 |
for i, desc in enumerate(descriptions, 1):
|
|
|
144 |
self.frame_index = None
|
145 |
self.frame_data = None
|
146 |
self.processed = False
|
147 |
+
self.current_video_path = None # Store the video path
|
148 |
+
self.temp_dir = tempfile.mkdtemp()
|
149 |
+
|
150 |
+
def __del__(self):
|
151 |
+
"""Cleanup temporary files"""
|
152 |
+
if hasattr(self, 'temp_dir') and os.path.exists(self.temp_dir):
|
153 |
+
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
154 |
|
155 |
def process_video(self, video_file, progress=gr.Progress()):
|
156 |
"""Process video with progress tracking"""
|
157 |
try:
|
158 |
if video_file is None:
|
159 |
return "Please upload a video first."
|
160 |
+
|
161 |
+
# Save uploaded video to temp directory
|
162 |
+
temp_video_path = os.path.join(self.temp_dir, "input_video.mp4")
|
163 |
+
shutil.copy2(video_file.name, temp_video_path)
|
164 |
+
self.current_video_path = temp_video_path
|
165 |
+
|
166 |
progress(0, desc="Starting video processing...")
|
167 |
self.frame_index, self.frame_data, message = self.processor.process_video(
|
168 |
+
self.current_video_path, progress
|
169 |
)
|
170 |
|
171 |
if self.frame_index is not None:
|
|
|
182 |
@torch.no_grad()
|
183 |
def answer_question(self, query):
|
184 |
"""Answer questions about the video"""
|
185 |
+
if not self.processed or self.current_video_path is None:
|
186 |
return None, "Please process a video first."
|
187 |
|
188 |
try:
|
|
|
204 |
descriptions = []
|
205 |
frames = []
|
206 |
|
207 |
+
# Use cv2.VideoCapture to read frames
|
208 |
+
cap = cv2.VideoCapture(self.current_video_path)
|
209 |
+
try:
|
210 |
+
for result in results:
|
211 |
+
frame_number = result['frame_number']
|
212 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
213 |
+
ret, frame = cap.read()
|
|
|
|
|
214 |
|
215 |
+
if ret:
|
216 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
217 |
+
frames.append(Image.fromarray(frame_rgb))
|
218 |
+
|
219 |
+
desc = f"Timestamp: {result['timestamp']:.2f}s\n"
|
220 |
+
desc += f"Scene Description: {result['caption']}\n"
|
221 |
+
desc += f"Relevance Score: {result['relevance']:.2f}"
|
222 |
+
descriptions.append(desc)
|
223 |
+
finally:
|
224 |
+
cap.release() # Ensure video capture is released
|
225 |
|
226 |
+
if not frames:
|
227 |
+
return None, "No relevant frames found."
|
228 |
|
229 |
combined_desc = "\n\nFrame Analysis:\n\n"
|
230 |
for i, desc in enumerate(descriptions, 1):
|