Spaces:
Running
Running
File size: 2,418 Bytes
10dcc2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
'''
COTR demo for a single image pair
'''
import argparse
import os
import time
import cv2
import numpy as np
import torch
import imageio
import matplotlib.pyplot as plt
from COTR.utils import utils, debug_utils
from COTR.models import build_model
from COTR.options.options import *
from COTR.options.options_utils import *
from COTR.inference.inference_helper import triangulate_corr
from COTR.inference.sparse_engine import SparseEngine
utils.fix_randomness(0)
torch.set_grad_enabled(False)
def main(opt):
model = build_model(opt)
model = model.cuda()
weights = torch.load(opt.load_weights_path, map_location='cpu')['model_state_dict']
utils.safe_load_weights(model, weights)
model = model.eval()
img_a = imageio.imread('./sample_data/imgs/cathedral_1.jpg', pilmode='RGB')
img_b = imageio.imread('./sample_data/imgs/cathedral_2.jpg', pilmode='RGB')
engine = SparseEngine(model, 32, mode='tile')
t0 = time.time()
corrs = engine.cotr_corr_multiscale_with_cycle_consistency(img_a, img_b, np.linspace(0.5, 0.0625, 4), 1, max_corrs=opt.max_corrs, queries_a=None)
t1 = time.time()
utils.visualize_corrs(img_a, img_b, corrs)
print(f'spent {t1-t0} seconds for {opt.max_corrs} correspondences.')
dense = triangulate_corr(corrs, img_a.shape, img_b.shape)
warped = cv2.remap(img_b, dense[..., 0].astype(np.float32), dense[..., 1].astype(np.float32), interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
plt.imshow(warped / 255 * 0.5 + img_a / 255 * 0.5)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
set_COTR_arguments(parser)
parser.add_argument('--out_dir', type=str, default=general_config['out'], help='out directory')
parser.add_argument('--load_weights', type=str, default=None, help='load a pretrained set of weights, you need to provide the model id')
parser.add_argument('--max_corrs', type=int, default=100, help='number of correspondences')
opt = parser.parse_args()
opt.command = ' '.join(sys.argv)
layer_2_channels = {'layer1': 256,
'layer2': 512,
'layer3': 1024,
'layer4': 2048, }
opt.dim_feedforward = layer_2_channels[opt.layer]
if opt.load_weights:
opt.load_weights_path = os.path.join(opt.out_dir, opt.load_weights, 'checkpoint.pth.tar')
print_opt(opt)
main(opt)
|