summaryrefslogtreecommitdiff
path: root/scan.py
diff options
context:
space:
mode:
Diffstat (limited to 'scan.py')
-rw-r--r--scan.py87
1 files changed, 48 insertions, 39 deletions
diff --git a/scan.py b/scan.py
index 792e852..75e14bb 100644
--- a/scan.py
+++ b/scan.py
@@ -1,56 +1,65 @@
-from __future__ import print_function
-
import cv2
+import imutils
import numpy as np
-MAX_MATCHES = 500
-GOOD_MATCH_PERCENT = 0.15
+
+def order_points(pts):
+ rect = np.zeros((4, 2), dtype="float32")
+ s = pts.sum(axis=1)
+ rect[0] = pts[np.argmin(s)]
+ rect[2] = pts[np.argmax(s)]
+ diff = np.diff(pts, axis=1)
+ rect[1] = pts[np.argmin(diff)]
+ rect[3] = pts[np.argmax(diff)]
+ return rect
-def align_images(im1, im2):
- im1_gra = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
- im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
+def four_point_transform(image, pts):
+ rect = order_points(pts)
+ (tl, tr, br, bl) = rect
+ widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
+ widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
+ maxWidth = max(int(widthA), int(widthB))
- orb = cv2.ORB_create(MAX_MATCHES)
- keypoints1, descriptors1 = orb.detectAndCompute(im1_gra, None)
- keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None)
+ heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
+ heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
+ maxHeight = max(int(heightA), int(heightB))
- matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
- matches = matcher.match(descriptors1, descriptors2, None)
+ dst = np.array([
+ [0, 0],
+ [maxWidth - 1, 0],
+ [maxWidth - 1, maxHeight - 1],
+ [0, maxHeight - 1]], dtype="float32")
- matches.sort(key=lambda x: x.distance, reverse=False)
+ M = cv2.getPerspectiveTransform(rect, dst)
+ warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
- num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
- matches = matches[:num_good_matches]
+ return warped
- im_matches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
- cv2.imwrite("matches.jpg", im_matches)
- points1 = np.zeros((len(matches), 2), dtype=np.float32)
- points2 = np.zeros((len(matches), 2), dtype=np.float32)
+image = cv2.imread("example.jpg")
+ratio = image.shape[0] / 500.0
+orig = image.copy()
- for i, match in enumerate(matches):
- points1[i, :] = keypoints1[match.queryIdx].pt
- points2[i, :] = keypoints2[match.trainIdx].pt
+gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+gray = cv2.GaussianBlur(gray, (5, 5), 0)
+edged = cv2.Canny(gray, 75, 200)
- h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
+cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
+cnts = imutils.grab_contours(cnts)
+cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
- height, width, channels = im2.shape
- im1_reg = cv2.warpPerspective(im1, h, (width, height))
+for c in cnts:
+ peri = cv2.arcLength(c, True)
+ approx = cv2.approxPolyDP(c, 0.02 * peri, True)
+ if len(approx) == 4:
+ screenCnt = approx
+ break
- return im1_reg, h
+cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
+warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
-if __name__ == '__main__':
- ref_filename = "mask.jpg"
- print("Reading reference image : ", ref_filename)
- im_reference = cv2.imread(ref_filename, cv2.IMREAD_COLOR)
- im_filename = "example.jpg"
- print("Reading image to align : ", im_filename)
- im = cv2.imread(im_filename, cv2.IMREAD_COLOR)
- print("Aligning images ...")
- im_req, h = align_images(im, im_reference)
- out_filename = "aligned.jpg"
- print("Saving aligned image : ", out_filename)
- cv2.imwrite(out_filename, im_req)
- print("Estimated homography : \n", h)
+cv2.imshow("Original", imutils.resize(orig, height=650))
+cv2.imshow("Scanned", imutils.resize(warped, height=650))
+cv2.waitKey(0)