File size: 718 Bytes
0a82b18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
from kornia.feature import LoFTR
import torchvision.transforms as tfm
from matching import BaseMatcher
class LoftrMatcher(BaseMatcher):
def __init__(self, device="cpu", *args, **kwargs):
super().__init__(device, **kwargs)
self.model = LoFTR(pretrained="outdoor").to(self.device)
def preprocess(self, img):
return tfm.Grayscale()(img).unsqueeze(0).to(self.device)
def _forward(self, img0, img1):
img0 = self.preprocess(img0)
img1 = self.preprocess(img1)
batch = {"image0": img0, "image1": img1}
output = self.model(batch)
mkpts0, mkpts1 = output["keypoints0"], output["keypoints1"]
return mkpts0, mkpts1, None, None, None, None
|