Some OpenCV tutorials have old specifications, so I will post the code for those who want to move it for the time being.
import numpy as np
import cv2
from google.colab.patches import cv2_imshow
img1 = cv2.imread('box.png', 0)
img2 = cv2.imread('box_in_scene.png', 0)
akaze = cv2.AKAZE_create()
kp1, des1 = akaze.detectAndCompute(img1, None)
kp2, des2 = akaze.detectAndCompute(img2, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
#bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
# Apply ratio test
good = []
good2 = []
for m,n in matches:
    if m.distance < 0.75*n.distance:
        good.append([m])
        good2.append(m)
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
cv2_imshow(img3)
MIN_MATCH_COUNT  =10
if len(good)>MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good2 ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good2 ]).reshape(-1,1,2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    matchesMask = mask.ravel().tolist()
    h,w = img1.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)
    img3 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
    cv2_imshow(img3)
    img4 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
    cv2_imshow(img4)
        Recommended Posts