diff options
-rw-r--r-- | mask.jpg | bin | 0 -> 13591 bytes | |||
-rw-r--r-- | mask.png | bin | 9167 -> 0 bytes | |||
-rw-r--r-- | matches.jpg | bin | 0 -> 189459 bytes | |||
-rw-r--r-- | scan.py | 87 |
4 files changed, 39 insertions, 48 deletions
diff --git a/mask.jpg b/mask.jpg Binary files differnew file mode 100644 index 0000000..aafa555 --- /dev/null +++ b/mask.jpg diff --git a/mask.png b/mask.png Binary files differdeleted file mode 100644 index 2d97ca4..0000000 --- a/mask.png +++ /dev/null diff --git a/matches.jpg b/matches.jpg Binary files differnew file mode 100644 index 0000000..d8ef11a --- /dev/null +++ b/matches.jpg @@ -1,65 +1,56 @@ +from __future__ import print_function + import cv2 -import imutils import numpy as np - -def order_points(pts): - rect = np.zeros((4, 2), dtype="float32") - s = pts.sum(axis=1) - rect[0] = pts[np.argmin(s)] - rect[2] = pts[np.argmax(s)] - diff = np.diff(pts, axis=1) - rect[1] = pts[np.argmin(diff)] - rect[3] = pts[np.argmax(diff)] - return rect +MAX_MATCHES = 500 +GOOD_MATCH_PERCENT = 0.15 -def four_point_transform(image, pts): - rect = order_points(pts) - (tl, tr, br, bl) = rect - widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) - widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) - maxWidth = max(int(widthA), int(widthB)) +def align_images(im1, im2): + im1_gra = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) + im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) - heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) - heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) - maxHeight = max(int(heightA), int(heightB)) + orb = cv2.ORB_create(MAX_MATCHES) + keypoints1, descriptors1 = orb.detectAndCompute(im1_gra, None) + keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None) - dst = np.array([ - [0, 0], - [maxWidth - 1, 0], - [maxWidth - 1, maxHeight - 1], - [0, maxHeight - 1]], dtype="float32") + matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) + matches = matcher.match(descriptors1, descriptors2, None) - M = cv2.getPerspectiveTransform(rect, dst) - warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + matches.sort(key=lambda x: x.distance, reverse=False) - return warped + num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT) + matches = matches[:num_good_matches] + im_matches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None) + cv2.imwrite("matches.jpg", im_matches) -image = cv2.imread("example.jpg") -ratio = image.shape[0] / 500.0 -orig = image.copy() + points1 = np.zeros((len(matches), 2), dtype=np.float32) + points2 = np.zeros((len(matches), 2), dtype=np.float32) -gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) -gray = cv2.GaussianBlur(gray, (5, 5), 0) -edged = cv2.Canny(gray, 75, 200) + for i, match in enumerate(matches): + points1[i, :] = keypoints1[match.queryIdx].pt + points2[i, :] = keypoints2[match.trainIdx].pt -cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) -cnts = imutils.grab_contours(cnts) -cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] + h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) -for c in cnts: - peri = cv2.arcLength(c, True) - approx = cv2.approxPolyDP(c, 0.02 * peri, True) - if len(approx) == 4: - screenCnt = approx - break + height, width, channels = im2.shape + im1_reg = cv2.warpPerspective(im1, h, (width, height)) -cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) + return im1_reg, h -warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) -cv2.imshow("Original", imutils.resize(orig, height=650)) -cv2.imshow("Scanned", imutils.resize(warped, height=650)) -cv2.waitKey(0) +if __name__ == '__main__': + ref_filename = "mask.jpg" + print("Reading reference image : ", ref_filename) + im_reference = cv2.imread(ref_filename, cv2.IMREAD_COLOR) + im_filename = "example.jpg" + print("Reading image to align : ", im_filename) + im = cv2.imread(im_filename, cv2.IMREAD_COLOR) + print("Aligning images ...") + im_req, h = align_images(im, im_reference) + out_filename = "aligned.jpg" + print("Saving aligned image : ", out_filename) + cv2.imwrite(out_filename, im_req) + print("Estimated homography : \n", h) |