Dy commited on
Commit
7017f26
1 Parent(s): 069859e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -57
app.py CHANGED
@@ -1,53 +1,3 @@
1
- OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
2
- ELEVEN_LABS_API = os.environ['ELEVEN_LABS_API']
3
- PASSWORD_AUTH = os.environ['PASSWORD_AUTH']
4
-
5
- from elevenlabs import clone, generate, play
6
- from elevenlabs import set_api_key
7
- set_api_key(ELEVEN_LABS_API)
8
-
9
- def process_video_custom_voice(uploaded_file, prompt_user, prompt_input, custom_audio, voice_prompt):
10
-
11
- if type(uploaded_file) == str:
12
- video_filename = uploaded_file
13
- else:
14
- video_filename = uploaded_file.name
15
- print("video", video_filename)
16
-
17
- base64Frames, video_filename, video_duration = video_to_frames(video_filename)
18
-
19
- final_prompt = prompt_type(prompt_user, prompt_input, video_duration)
20
- print(final_prompt)
21
- text = frames_to_story(base64Frames, final_prompt, video_duration)
22
-
23
- if type(custom_audio) == str:
24
- custom_audio_filename = custom_audio
25
- else:
26
- custom_audio_filename = custom_audio.name
27
- print("custom audio", custom_audio_filename)
28
-
29
- voice = clone(
30
- name="Custom Voice",
31
- description=f"{voice_prompt}", # Optional
32
- files=[custom_audio_filename],
33
- )
34
-
35
- audio = generate(text=text, voice=voice)
36
-
37
- audio_filename = custom_audio_filename
38
-
39
- # Merge audio and video
40
- output_video_filename = os.path.splitext(video_filename)[0] + '_output.mp4'
41
- final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename)
42
- print("final", final_video_filename)
43
-
44
- if type(uploaded_file) != str:
45
- os.unlink(video_filename)
46
- os.unlink(audio_filename)
47
-
48
- return final_video_filename, text
49
-
50
-
51
  import openai
52
  import requests
53
  import os
@@ -61,6 +11,10 @@ import tempfile
61
  import numpy as np
62
  import gradio as gr
63
 
 
 
 
 
64
  # Set your OpenAI API key here
65
  openai.api_key = OPENAI_API_KEY
66
 
@@ -392,11 +346,7 @@ with gr.Blocks() as demo:
392
  prompt_user = gr.Textbox(label="Enter your prompt")
393
  prompt_input = gr.Dropdown(['how-to', 'documentary', 'sports-commentator', 'custom-prompt'], label="Choose Your Narration")
394
  voice_type = gr.Dropdown(['masculine-american', 'masculine-british', 'feminine-american', 'feminine-british'], label="Choose Your Voice")
395
-
396
  generate_btn = gr.Button(value="Generate")
397
- voice_sample = gr.File(label="Use custom made voice.")
398
- voice_prompt = gr.Textbox(label="Enter voice prompt.")
399
-
400
  #render_btn = gr.Button(value="Render")
401
  #print_btn = gr.Button(value="Print")
402
  with gr.Column():
@@ -404,13 +354,12 @@ with gr.Blocks() as demo:
404
  output_file = gr.Video(label="Ouput video file.")
405
  output_voiceover = gr.Textbox(label="Generated Text")
406
  regenerate_btn = gr.Button(value="Re-generate")
407
- custom_voice_btn = gr.Button(value="Use Custom Voice")
408
  #print_text = gr.Text(label="Printing")
409
 
410
 
411
  generate_btn.click(process_video, inputs=[video_input, prompt_user, prompt_input, voice_type], outputs=[output_file,output_voiceover])
412
  regenerate_btn.click(regenerate, inputs=[video_input, output_voiceover, voice_type], outputs=[output_file,output_voiceover])
413
- custom_voice_btn.click(process_video_custom_voice, inputs=[video_input, prompt_user, prompt_input, voice_sample, voice_prompt], outputs=[output_file,output_voiceover])
414
 
415
 
416
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import openai
2
  import requests
3
  import os
 
11
  import numpy as np
12
  import gradio as gr
13
 
14
+ OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
15
+ ELEVEN_LABS_API = os.environ['ELEVEN_LABS_API']
16
+ PASSWORD_AUTH = os.environ['PASSWORD_AUTH']
17
+
18
  # Set your OpenAI API key here
19
  openai.api_key = OPENAI_API_KEY
20
 
 
346
  prompt_user = gr.Textbox(label="Enter your prompt")
347
  prompt_input = gr.Dropdown(['how-to', 'documentary', 'sports-commentator', 'custom-prompt'], label="Choose Your Narration")
348
  voice_type = gr.Dropdown(['masculine-american', 'masculine-british', 'feminine-american', 'feminine-british'], label="Choose Your Voice")
 
349
  generate_btn = gr.Button(value="Generate")
 
 
 
350
  #render_btn = gr.Button(value="Render")
351
  #print_btn = gr.Button(value="Print")
352
  with gr.Column():
 
354
  output_file = gr.Video(label="Ouput video file.")
355
  output_voiceover = gr.Textbox(label="Generated Text")
356
  regenerate_btn = gr.Button(value="Re-generate")
 
357
  #print_text = gr.Text(label="Printing")
358
 
359
 
360
  generate_btn.click(process_video, inputs=[video_input, prompt_user, prompt_input, voice_type], outputs=[output_file,output_voiceover])
361
  regenerate_btn.click(regenerate, inputs=[video_input, output_voiceover, voice_type], outputs=[output_file,output_voiceover])
362
+
363
 
364
 
365
+ demo.launch(auth=("admin", PASSWORD_AUTH))