KarthickAdopleAI commited on
Commit
b741ac4
1 Parent(s): 2d52a11

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -21
app.py CHANGED
@@ -9,12 +9,12 @@ import nltk
9
  from gtts import gTTS
10
  from sklearn.feature_extraction.text import TfidfVectorizer
11
  from langchain import HuggingFaceHub, PromptTemplate, LLMChain
12
- from huggingsound import SpeechRecognitionModel
13
  import gradio as gr
14
  from pytube import YouTube
15
  import requests
16
  import logging
17
  import os
 
18
  nltk.download('punkt')
19
  nltk.download('stopwords')
20
 
@@ -83,7 +83,6 @@ class VideoAnalytics:
83
  def generate_video_summary(self) -> str:
84
  """
85
  Generate a summary of the transcribed video.
86
-
87
  Returns:
88
  str: Generated summary.
89
  """
@@ -116,7 +115,6 @@ class VideoAnalytics:
116
  def generate_topics(self) -> str:
117
  """
118
  Generate topics from the transcribed video.
119
-
120
  Returns:
121
  str: Generated topics.
122
  """
@@ -147,7 +145,6 @@ class VideoAnalytics:
147
  def translation(self) -> str:
148
  """
149
  translation from the transcribed video.
150
-
151
  Returns:
152
  str: translation.
153
  """
@@ -176,11 +173,9 @@ class VideoAnalytics:
176
  def format_prompt(self, question: str, data: str) -> str:
177
  """
178
  Formats the prompt for the language model.
179
-
180
  Args:
181
  question (str): The user's question.
182
  data (str): The data to be analyzed.
183
-
184
  Returns:
185
  str: Formatted prompt.
186
  """
@@ -197,7 +192,6 @@ class VideoAnalytics:
197
  repetition_penalty=1.0) -> str:
198
  """
199
  Generates text based on the prompt and transcribed text.
200
-
201
  Args:
202
  prompt (str): The prompt for generating text.
203
  transcribed_text (str): The transcribed text for analysis.
@@ -205,7 +199,6 @@ class VideoAnalytics:
205
  max_new_tokens (int): Maximum number of tokens to generate. Default is 5000.
206
  top_p (float): Nucleus sampling parameter. Default is 0.95.
207
  repetition_penalty (float): Penalty for repeating the same token. Default is 1.0.
208
-
209
  Returns:
210
  str: Generated text.
211
  """
@@ -241,11 +234,9 @@ class VideoAnalytics:
241
  def video_qa(self, question: str, model: str) -> str:
242
  """
243
  Performs video question answering.
244
-
245
  Args:
246
  question (str): The question asked by the user.
247
  model (str): The language model to be used ("OpenAI" or "Mixtral").
248
-
249
  Returns:
250
  str: Answer to the user's question.
251
  """
@@ -273,7 +264,6 @@ class VideoAnalytics:
273
  def extract_video_important_sentence(self) -> str:
274
  """
275
  Extract important sentences from the transcribed video.
276
-
277
  Returns:
278
  str: Extracted important sentences.
279
  """
@@ -315,7 +305,6 @@ class VideoAnalytics:
315
  def write_text_files(self, text: str, filename: str) -> None:
316
  """
317
  Write text to a file.
318
-
319
  Args:
320
  text (str): Text to be written to the file.
321
  filename (str): Name of the file.
@@ -331,10 +320,8 @@ class VideoAnalytics:
331
  def Download(self, link: str) -> str:
332
  """
333
  Download a video from YouTube.
334
-
335
  Args:
336
  link (str): YouTube video link.
337
-
338
  Returns:
339
  str: Path to the downloaded video file.
340
  """
@@ -366,11 +353,9 @@ class VideoAnalytics:
366
  def main(self, video: str = None, input_path: str = None) -> tuple:
367
  """
368
  Perform video analytics.
369
-
370
  Args:
371
  video (str): Path to the video file.
372
  input_path (str): Input path for the video.
373
-
374
  Returns:
375
  tuple: Summary, important sentences, and topics.
376
  """
@@ -381,19 +366,19 @@ class VideoAnalytics:
381
  video_ = VideoFileClip(input_path)
382
  duration = video_.duration
383
  video_.close()
384
- if round(duration) <= 36000:
385
  text = self.transcribe_video(input_path)
386
  else:
387
- return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","",""
388
  elif video:
389
  video_ = VideoFileClip(video)
390
  duration = video_.duration
391
  video_.close()
392
- if round(duration) <= 36000:
393
  text = self.transcribe_video(video)
394
  input_path = video
395
  else:
396
- return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","",""
397
  # Generate summary, important sentences, and topics
398
  summary = self.generate_video_summary()
399
  self.write_text_files(summary,"Summary")
@@ -454,7 +439,7 @@ class VideoAnalytics:
454
  submit_btn.click(self.main,[video,yt_link],[summary,Important_Sentences,Topics,summary_audio,important_sentence_audio,topics_audio])
455
  question.submit(self.video_qa,[question,model],result)
456
  demo.launch()
457
-
458
  if __name__ == "__main__":
459
  video_analytics = VideoAnalytics()
460
  video_analytics.gradio_interface()
 
9
  from gtts import gTTS
10
  from sklearn.feature_extraction.text import TfidfVectorizer
11
  from langchain import HuggingFaceHub, PromptTemplate, LLMChain
 
12
  import gradio as gr
13
  from pytube import YouTube
14
  import requests
15
  import logging
16
  import os
17
+ from huggingsound import SpeechRecognitionModel
18
  nltk.download('punkt')
19
  nltk.download('stopwords')
20
 
 
83
  def generate_video_summary(self) -> str:
84
  """
85
  Generate a summary of the transcribed video.
 
86
  Returns:
87
  str: Generated summary.
88
  """
 
115
  def generate_topics(self) -> str:
116
  """
117
  Generate topics from the transcribed video.
 
118
  Returns:
119
  str: Generated topics.
120
  """
 
145
  def translation(self) -> str:
146
  """
147
  translation from the transcribed video.
 
148
  Returns:
149
  str: translation.
150
  """
 
173
  def format_prompt(self, question: str, data: str) -> str:
174
  """
175
  Formats the prompt for the language model.
 
176
  Args:
177
  question (str): The user's question.
178
  data (str): The data to be analyzed.
 
179
  Returns:
180
  str: Formatted prompt.
181
  """
 
192
  repetition_penalty=1.0) -> str:
193
  """
194
  Generates text based on the prompt and transcribed text.
 
195
  Args:
196
  prompt (str): The prompt for generating text.
197
  transcribed_text (str): The transcribed text for analysis.
 
199
  max_new_tokens (int): Maximum number of tokens to generate. Default is 5000.
200
  top_p (float): Nucleus sampling parameter. Default is 0.95.
201
  repetition_penalty (float): Penalty for repeating the same token. Default is 1.0.
 
202
  Returns:
203
  str: Generated text.
204
  """
 
234
  def video_qa(self, question: str, model: str) -> str:
235
  """
236
  Performs video question answering.
 
237
  Args:
238
  question (str): The question asked by the user.
239
  model (str): The language model to be used ("OpenAI" or "Mixtral").
 
240
  Returns:
241
  str: Answer to the user's question.
242
  """
 
264
  def extract_video_important_sentence(self) -> str:
265
  """
266
  Extract important sentences from the transcribed video.
 
267
  Returns:
268
  str: Extracted important sentences.
269
  """
 
305
  def write_text_files(self, text: str, filename: str) -> None:
306
  """
307
  Write text to a file.
 
308
  Args:
309
  text (str): Text to be written to the file.
310
  filename (str): Name of the file.
 
320
  def Download(self, link: str) -> str:
321
  """
322
  Download a video from YouTube.
 
323
  Args:
324
  link (str): YouTube video link.
 
325
  Returns:
326
  str: Path to the downloaded video file.
327
  """
 
353
  def main(self, video: str = None, input_path: str = None) -> tuple:
354
  """
355
  Perform video analytics.
 
356
  Args:
357
  video (str): Path to the video file.
358
  input_path (str): Input path for the video.
 
359
  Returns:
360
  tuple: Summary, important sentences, and topics.
361
  """
 
366
  video_ = VideoFileClip(input_path)
367
  duration = video_.duration
368
  video_.close()
369
+ if round(duration) <= 6*600:
370
  text = self.transcribe_video(input_path)
371
  else:
372
+ return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","","",None,None,None
373
  elif video:
374
  video_ = VideoFileClip(video)
375
  duration = video_.duration
376
  video_.close()
377
+ if round(duration) <= 6*600:
378
  text = self.transcribe_video(video)
379
  input_path = video
380
  else:
381
+ return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","","",None,None,None
382
  # Generate summary, important sentences, and topics
383
  summary = self.generate_video_summary()
384
  self.write_text_files(summary,"Summary")
 
439
  submit_btn.click(self.main,[video,yt_link],[summary,Important_Sentences,Topics,summary_audio,important_sentence_audio,topics_audio])
440
  question.submit(self.video_qa,[question,model],result)
441
  demo.launch()
442
+
443
  if __name__ == "__main__":
444
  video_analytics = VideoAnalytics()
445
  video_analytics.gradio_interface()