Kvikontent commited on
Commit
d632df8
β€’
1 Parent(s): 3141b27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -13
app.py CHANGED
@@ -9,21 +9,13 @@ import numpy as np
9
  pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
10
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
11
  pipe.enable_model_cpu_offload()
 
12
 
13
  @spaces.GPU(duration=250)
14
- def generate(prompt, num_inference_steps=25):
15
- video_frames = pipe(prompt, num_inference_steps).frames
16
- resized_frames = []
17
- allowed_resolutions = [16, 24, 32, 40, 48, 56, 64, 128, 256, 512] # Define allowed resolutions (multiples of 8)
18
- for frame in video_frames:
19
- height, width, _ = frame.shape
20
- # Find the closest allowed resolution smaller than the original
21
- new_height = max(res for res in allowed_resolutions if res < height)
22
- new_width = max(res for res in allowed_resolutions if res < width)
23
- resized_frame = cv2.resize(frame, (new_width, new_height))
24
- resized_frames.append(resized_frame)
25
- video_path = export_to_video(np.array(resized_frames))
26
- return video_path
27
 
28
  prompt = gr.Textbox("Enter prompt to generate a video")
29
  num_inference_steps = gr.Slider(10, 50, value=25)
 
9
  pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
10
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
11
  pipe.enable_model_cpu_offload()
12
+ pipe.enable_vae_slicing()
13
 
14
  @spaces.GPU(duration=250)
15
+ def generate(prompt, num_inference_steps=25, frames=200):
16
+ video_frames = pipe(prompt, num_inference_steps, num_frames).frames
17
+ video_path = export_to_video(video_frames)
18
+ return video_path
 
 
 
 
 
 
 
 
 
19
 
20
  prompt = gr.Textbox("Enter prompt to generate a video")
21
  num_inference_steps = gr.Slider(10, 50, value=25)