Kvikontent commited on
Commit
3141b27
β€’
1 Parent(s): eaf31ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -17
app.py CHANGED
@@ -12,25 +12,27 @@ pipe.enable_model_cpu_offload()
12
 
13
  @spaces.GPU(duration=250)
14
  def generate(prompt, num_inference_steps=25):
15
- video_frames = pipe(prompt, num_inference_steps).frames
16
- resized_frames = []
17
- for frame in video_frames:
18
- height, width, _ = frame.shape
19
- new_height = (height // 8) * 8
20
- new_width = (width // 8) * 8
21
- resized_frame = cv2.resize(frame, (new_width, new_height))
22
- resized_frames.append(resized_frame)
23
- video_path = export_to_video(np.array(resized_frames))
24
- return video_path
 
 
25
 
26
  prompt = gr.Textbox("Enter prompt to generate a video")
27
  num_inference_steps = gr.Slider(10, 50, value=25)
28
 
29
  interface = gr.Interface(
30
- generate,
31
- inputs=[prompt, num_inference_steps],
32
- examples=[["Astronaut riding a horse", 25], ["Darth vader surfing in waves", 20]],
33
- outputs="video",
34
- cache_examples=False,
35
- theme="soft"
36
- ).launch()
 
12
 
13
  @spaces.GPU(duration=250)
14
  def generate(prompt, num_inference_steps=25):
15
+ video_frames = pipe(prompt, num_inference_steps).frames
16
+ resized_frames = []
17
+ allowed_resolutions = [16, 24, 32, 40, 48, 56, 64, 128, 256, 512] # Define allowed resolutions (multiples of 8)
18
+ for frame in video_frames:
19
+ height, width, _ = frame.shape
20
+ # Find the closest allowed resolution smaller than the original
21
+ new_height = max(res for res in allowed_resolutions if res < height)
22
+ new_width = max(res for res in allowed_resolutions if res < width)
23
+ resized_frame = cv2.resize(frame, (new_width, new_height))
24
+ resized_frames.append(resized_frame)
25
+ video_path = export_to_video(np.array(resized_frames))
26
+ return video_path
27
 
28
  prompt = gr.Textbox("Enter prompt to generate a video")
29
  num_inference_steps = gr.Slider(10, 50, value=25)
30
 
31
  interface = gr.Interface(
32
+ generate,
33
+ inputs=[prompt, num_inference_steps],
34
+ examples=[["Astronaut riding a horse", 25], ["Darth vader surfing in waves", 20]],
35
+ outputs="video",
36
+ cache_examples=False,
37
+ theme="soft"
38
+ ).launch()