JohanDL commited on
Commit
10b0245
1 Parent(s): bbd0fe5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -10,6 +10,12 @@ import tempfile
10
  from depth_anything.dpt import DepthAnything
11
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
12
 
 
 
 
 
 
 
13
  def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
14
  # Define path for temporary processed frames
15
  temp_frame_dir = tempfile.mkdtemp()
@@ -76,8 +82,7 @@ def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
76
  frame = transform({'image': frame})['image']
77
  frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
78
 
79
- with torch.no_grad():
80
- depth = depth_anything(frame)
81
 
82
  depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
83
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
 
10
  from depth_anything.dpt import DepthAnything
11
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
12
 
13
+
14
+ @spaces.GPU
15
+ @torch.no_grad()
16
+ def predict_depth(model, image):
17
+ return model(image)
18
+
19
  def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
20
  # Define path for temporary processed frames
21
  temp_frame_dir = tempfile.mkdtemp()
 
82
  frame = transform({'image': frame})['image']
83
  frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
84
 
85
+ predict_depth(depth_anything, frame)
 
86
 
87
  depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
88
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0