JohanDL commited on
Commit
5e79f53
1 Parent(s): 7962bc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -17,7 +17,9 @@ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
17
  def predict_depth(model, image):
18
  return model(image)
19
 
20
- def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
 
 
21
  # Define path for temporary processed frames
22
  temp_frame_dir = tempfile.mkdtemp()
23
 
@@ -115,8 +117,7 @@ css = """
115
  max-height: 80vh;
116
  }
117
  """
118
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
119
- model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
120
 
121
  title = "# Depth Anything Video Demo"
122
  description = """Depth Anything on full video files.
@@ -137,9 +138,9 @@ transform = Compose([
137
  PrepareForNet(),
138
  ])
139
 
140
- @torch.no_grad()
141
- def predict_depth(model, image):
142
- return model(image)
143
 
144
  with gr.Blocks(css=css) as demo:
145
  gr.Markdown(title)
@@ -163,7 +164,7 @@ with gr.Blocks(css=css) as demo:
163
  example_files = os.listdir('assets/examples_video')
164
  example_files.sort()
165
  example_files = [os.path.join('assets/examples_video', filename) for filename in example_files]
166
- examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=processed_video, fn=on_submit, cache_examples=False)
167
 
168
 
169
  if __name__ == '__main__':
 
17
  def predict_depth(model, image):
18
  return model(image)
19
 
20
+ def make_video(video_path, outdir='./vis_video_depth',encoder='vits'):
21
+ # DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
22
+ # model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
23
  # Define path for temporary processed frames
24
  temp_frame_dir = tempfile.mkdtemp()
25
 
 
117
  max-height: 80vh;
118
  }
119
  """
120
+
 
121
 
122
  title = "# Depth Anything Video Demo"
123
  description = """Depth Anything on full video files.
 
138
  PrepareForNet(),
139
  ])
140
 
141
+ # @torch.no_grad()
142
+ # def predict_depth(model, image):
143
+ # return model(image)
144
 
145
  with gr.Blocks(css=css) as demo:
146
  gr.Markdown(title)
 
164
  example_files = os.listdir('assets/examples_video')
165
  example_files.sort()
166
  example_files = [os.path.join('assets/examples_video', filename) for filename in example_files]
167
+ examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=processed_video, fn=on_submit, cache_examples=True)
168
 
169
 
170
  if __name__ == '__main__':