Spaces:
No application file
No application file
Vincentqyw
commited on
Commit
•
c7a0722
1
Parent(s):
f448045
add app queue
Browse files
app.py
CHANGED
@@ -278,7 +278,7 @@ def run(config):
|
|
278 |
matcher_info,
|
279 |
]
|
280 |
button_reset.click(fn=ui_reset_state, inputs=inputs, outputs=reset_outputs)
|
281 |
-
|
282 |
app.launch(share=False)
|
283 |
|
284 |
|
|
|
278 |
matcher_info,
|
279 |
]
|
280 |
button_reset.click(fn=ui_reset_state, inputs=inputs, outputs=reset_outputs)
|
281 |
+
app.queue()
|
282 |
app.launch(share=False)
|
283 |
|
284 |
|
hloc/extractors/dedode.py
CHANGED
@@ -64,8 +64,8 @@ class DeDoDe(BaseModel):
|
|
64 |
# load the model
|
65 |
weights_detector = torch.load(model_detector_path, map_location="cpu")
|
66 |
weights_descriptor = torch.load(model_descriptor_path, map_location="cpu")
|
67 |
-
self.detector = dedode_detector_L(weights=weights_detector, device
|
68 |
-
self.descriptor = dedode_descriptor_B(weights=weights_descriptor, device
|
69 |
|
70 |
logger.info(f"Load DeDoDe model done.")
|
71 |
|
|
|
64 |
# load the model
|
65 |
weights_detector = torch.load(model_detector_path, map_location="cpu")
|
66 |
weights_descriptor = torch.load(model_descriptor_path, map_location="cpu")
|
67 |
+
self.detector = dedode_detector_L(weights=weights_detector, device=device)
|
68 |
+
self.descriptor = dedode_descriptor_B(weights=weights_descriptor, device=device)
|
69 |
|
70 |
logger.info(f"Load DeDoDe model done.")
|
71 |
|
third_party/ASpanFormer/src/ASpanFormer/aspan_module/attention.py
CHANGED
@@ -6,6 +6,7 @@ from torch.nn import functional as F
|
|
6 |
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
|
|
9 |
class layernorm2d(nn.Module):
|
10 |
def __init__(self, dim):
|
11 |
super().__init__()
|
@@ -177,7 +178,8 @@ class HierachicalAttention(Module):
|
|
177 |
offset_sample = self.sample_offset[None, None] * span_scale
|
178 |
sample_pixel = offset[:, :, None] + offset_sample # B*G*r^2*2
|
179 |
sample_norm = (
|
180 |
-
sample_pixel / torch.tensor([wk / 2, hk / 2]).to(device)[None, None, None]
|
|
|
181 |
)
|
182 |
|
183 |
q = (
|
|
|
6 |
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
+
|
10 |
class layernorm2d(nn.Module):
|
11 |
def __init__(self, dim):
|
12 |
super().__init__()
|
|
|
178 |
offset_sample = self.sample_offset[None, None] * span_scale
|
179 |
sample_pixel = offset[:, :, None] + offset_sample # B*G*r^2*2
|
180 |
sample_norm = (
|
181 |
+
sample_pixel / torch.tensor([wk / 2, hk / 2]).to(device)[None, None, None]
|
182 |
+
- 1
|
183 |
)
|
184 |
|
185 |
q = (
|
third_party/DeDoDe/DeDoDe/utils.py
CHANGED
@@ -13,6 +13,7 @@ from time import perf_counter
|
|
13 |
|
14 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
|
|
|
16 |
def recover_pose(E, kpts0, kpts1, K0, K1, mask):
|
17 |
best_num_inliers = 0
|
18 |
K0inv = np.linalg.inv(K0[:2, :2])
|
|
|
13 |
|
14 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
+
|
17 |
def recover_pose(E, kpts0, kpts1, K0, K1, mask):
|
18 |
best_num_inliers = 0
|
19 |
K0inv = np.linalg.inv(K0[:2, :2])
|
third_party/SGMNet/sgmnet/match_model.py
CHANGED
@@ -5,6 +5,7 @@ eps = 1e-8
|
|
5 |
|
6 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
|
|
|
8 |
def sinkhorn(M, r, c, iteration):
|
9 |
p = torch.softmax(M, dim=-1)
|
10 |
u = torch.ones_like(r)
|
|
|
5 |
|
6 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
|
8 |
+
|
9 |
def sinkhorn(M, r, c, iteration):
|
10 |
p = torch.softmax(M, dim=-1)
|
11 |
u = torch.ones_like(r)
|