使用OpenCV Python以与参考图像相同的方向和尺寸变换和显示裁剪图像

使用OpenCV Python以与参考图像相同的方向和尺寸变换和显示裁剪图像,python,opencv,image-processing,crop,contour,Python,Opencv,Image Processing,Crop,Contour,我有一个参考图像,我正在使用ORB检测在更大的测试图像中找到该参考图像。找到后,我只想将裁剪中的区域保存为新图像。我想将其转换为与参考图像相同的尺寸,并以相同的方向保存 到目前为止,我已经匹配了较大图像中的ref图像,并将其遮住。但我不知道如何以正确的方向和尺寸仅显示裁剪区域作为自己的图片。我想保留庄稼里的东西,把剩下的都扔掉 任何帮助都将不胜感激。多谢各位 导入cv2 将numpy作为np导入 #进行匹配所需的最小ORB匹配 最小匹配计数=10 img1=cv2.imread(“refer

我有一个参考图像,我正在使用ORB检测在更大的测试图像中找到该参考图像。找到后,我只想将裁剪中的区域保存为新图像。我想将其转换为与参考图像相同的尺寸,并以相同的方向保存

到目前为止,我已经匹配了较大图像中的ref图像,并将其遮住。但我不知道如何以正确的方向和尺寸仅显示裁剪区域作为自己的图片。我想保留庄稼里的东西,把剩下的都扔掉

任何帮助都将不胜感激。多谢各位

导入cv2
将numpy作为np导入
#进行匹配所需的最小ORB匹配
最小匹配计数=10
img1=cv2.imread(“reference.jpg”)
img2=cv2.imread(“1.jpg”)
orb=cv2.orb_create()
kp1,des1=orb.detectAndCompute(img1,无)
kp2,des2=orb.detectAndCompute(img2,无)
bf=cv2.BFMatcher(cv2.NORM\u HAMMING,交叉检查=True)
匹配=bf.匹配(des1、des2、无)
#分类匹配
好的=[]
对于i,m,枚举(匹配项):
如果i最小匹配计数:
src_pts=np.float32([kp1[m.queryIdx].pt代表m处于良好状态])。重塑(-1,1,2)
dst_pts=np.float32([kp2[m.trainIdx].pt代表m处于良好状态])。重塑(-1,1,2)
M、 掩模=cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0)
matchesMask=mask.ravel().tolist()
h、 w,d=img1.1形状
pts=np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]])。重塑(-1,1,2)
dst=cv2.透视变换(pts,M)
mask=np.one(img2.shape[:2],dtype=“uint8”)*255
rect=cv2.minareact(dst)
长方体=cv2.长方体点(矩形)
box=np.int0(box)
新建=cv2.绘制轮廓(遮罩,[框],-1,0,-1)
#从图像中删除轮廓并显示结果图像
img=cv2.按位_和(img2,img2,mask=mask)
cv2.imshow(“面具”,面具)
cv2.imshow(“之后”,img)
其他:
打印(“没有找到足够的匹配项-{}/{}”。格式(len(good),MIN_MATCH_COUNT))
matchesMask=None
#这用于在ref和1.jpg图像之间绘制匹配线
绘制参数=dict(匹配颜色=(0255,0),#绘制绿色的匹配
singlePointColor=无,
matchesMask=matchesMask,#只绘制内线
旗帜=2)
img3=cv2.绘图匹配(img1、kp1、img2、kp2、良好、无、**绘图参数)
cv2.imshow(“匹配”,img3)

您走上了正确的道路,并且已经完成了大部分工作。因为您已经找到了ROI掩码,所以可以执行透视变换以获得正确的方向。之后,可以调整图像大小以匹配参考/模板图像

首先,我们反转掩模以获得所需的白色ROI,然后在此掩模上找到轮廓。从这里,我们使用
cv2.arcLength()
cv2.approxPolyDP()
找到角点。接下来,我们通过透视变换得到这个

现在我们有了正确的方向,我们只需要简单地调整大小以匹配模板图像。这是结果(左)和模板图像(右)

代码

导入cv2
将numpy作为np导入
def透视_变换(图像、角点):
def订单角点(角点):
#将角点分离为单独的点
#索引0-右上角
#1-左上角
#2-左下角
#3-右下角
角点=[(角点[0][0],角点[0][1])表示角点中的角点]
顶部,顶部,底部,底部=角[0],角[1],角[2],角[3]
返回(顶部,顶部,底部,底部)
#按顺时针顺序排列点
有序角点=有序角点(角点)
上下角=有序角
#确定新图像的宽度,即图像之间的最大距离
#(右下和左下)或(右上和左上)x坐标
宽度=np.sqrt(((底部\r[0]-底部\l[0])**2)+((底部\r[1]-底部\l[1])**2))
宽度B=np.sqrt(((top_r[0]-top_l[0])**2)+((top_r[1]-top_l[1])**2))
宽度=最大值(整数(宽度A),整数(宽度B))
#确定新图像的高度,即图像之间的最大距离
#(右上和右下)或(左上和左下)y坐标
高度=np.sqrt(((顶部\r[0]-底部\r[0])**2)+((顶部\r[1]-底部\r[1])**2))
高度=np.sqrt(((顶部[0]-底部[0])**2+((顶部[1]-底部[1])**2))
高度=最大值(整数(高度A),整数(高度B))
#构造新点以获取图像的自顶向下视图
#上下顺序
维度=np.数组([[0,0],[width-1,0],[width-1,height-1],
[0,高度-1]],dtype=“float32”)
#转换为Numpy格式
有序角点=np.array(有序角点,dtype=“float32”)
#求透视变换矩阵
矩阵=cv2.getPerspectiveTransform(有序角、尺寸)
#返回转换后的图像
返回cv2.透视图(图像、矩阵、(宽度、高度))
#进行匹配所需的最小ORB匹配
最小匹配计数=10
img1=cv2.imread(“reference.jpg”)
img2=cv2.imread(“1.jpg”)
orb=cv2.orb_create()
kp1,des1=orb.detectAndCompute(img1,无)
kp2,des2=orb.detectAndCompute(img2,无)
bf=cv2.BFMatcher(cv2.NORM\u HAMMING,交叉检查=True)
匹配=bf.匹配(des1、des2、无)
#分类匹配
好的=[]
对于i,m,枚举(匹配项):
如果i最小匹配计数:
src_pts=np.float32([kp1[m.queryIdx].pt代表m处于良好状态])。重塑(-1,1,2)
dst_pts=np.float32([kp2[m.trainIdx].pt代表m处于良好状态])。重塑(-1,1,2)
M、 掩模=cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0)
matchesMask=mask.ravel().tolist()
h、 w,d=img1.1形状
临时秘书处
import cv2
import numpy as np

#minimum ORB matches required to make a match
MIN_MATCH_COUNT = 10

img1 = cv2.imread("reference.jpg")
img2 = cv2.imread("1.jpg")

orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2, None)

# sorts matches
good = []
for i, m in enumerate(matches):
    if i < len(matches) - 1 and m.distance < 0.7 * matches[i+1].distance:
        good.append(m)

if len(good)>MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    matchesMask = mask.ravel().tolist()
    h,w,d = img1.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)
    mask = np.ones(img2.shape[:2], dtype="uint8") * 255
    rect = cv2.minAreaRect(dst)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    new = cv2.drawContours(mask, [box], -1, 0, -1)
    # remove the contours from the image and show the resulting images
    img = cv2.bitwise_and(img2, img2, mask=mask)
    cv2.imshow("Mask", mask)
    cv2.imshow("After", img)
else:
    print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
    matchesMask = None

#This is for drawing the match lines inbetween the ref and 1.jpg images
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
               singlePointColor = None,
               matchesMask = matchesMask, # draw only inliers
               flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.imshow("Matches", img3)
template shape: (210, 236, 3)
transformed shape: (288, 279, 3)
template shape: (210, 236, 3)
resized shape: (210, 236, 3)
import cv2
import numpy as np

def perspective_transform(image, corners):
    def order_corner_points(corners):
        # Separate corners into individual points
        # Index 0 - top-right
        #       1 - top-left
        #       2 - bottom-left
        #       3 - bottom-right
        corners = [(corner[0][0], corner[0][1]) for corner in corners]
        top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
        return (top_l, top_r, bottom_r, bottom_l)

    # Order points in clockwise order
    ordered_corners = order_corner_points(corners)
    top_l, top_r, bottom_r, bottom_l = ordered_corners

    # Determine width of new image which is the max distance between 
    # (bottom right and bottom left) or (top right and top left) x-coordinates
    width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
    width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
    width = max(int(width_A), int(width_B))

    # Determine height of new image which is the max distance between 
    # (top right and bottom right) or (top left and bottom left) y-coordinates
    height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
    height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
    height = max(int(height_A), int(height_B))

    # Construct new points to obtain top-down view of image in 
    # top_r, top_l, bottom_l, bottom_r order
    dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], 
                    [0, height - 1]], dtype = "float32")

    # Convert to Numpy format
    ordered_corners = np.array(ordered_corners, dtype="float32")

    # Find perspective transform matrix
    matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)

    # Return the transformed image
    return cv2.warpPerspective(image, matrix, (width, height))

#minimum ORB matches required to make a match
MIN_MATCH_COUNT = 10

img1 = cv2.imread("reference.jpg")
img2 = cv2.imread("1.jpg")

orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2, None)

# sorts matches
good = []
for i, m in enumerate(matches):
    if i < len(matches) - 1 and m.distance < 0.7 * matches[i+1].distance:
        good.append(m)

if len(good)>MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    matchesMask = mask.ravel().tolist()
    h,w,d = img1.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)
    mask = np.ones(img2.shape[:2], dtype="uint8") * 255
    rect = cv2.minAreaRect(dst)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    new = cv2.drawContours(mask, [box], -1, 0, -1)
    # remove the contours from the image and show the resulting images
    img = cv2.bitwise_and(img2, img2, mask=mask)
    mask = 255 - mask
    cv2.imshow("After", img)
else:
    print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
    matchesMask = None

#This is for drawing the match lines inbetween the ref and 1.jpg images
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
               singlePointColor = None,
               matchesMask = matchesMask, # draw only inliers
               flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.imshow("Matches", img3)

cv2.imshow("Mask", mask)

# Find contour on mask and perform perspective transform
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]

for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.015 * peri, True)
    if len(approx) == 4:
        transformed = perspective_transform(img2, approx)

cv2.imshow("transformed", transformed)

print('template shape:', img1.shape)
print('transformed shape:',transformed.shape)

resized = cv2.resize(transformed, (img1.shape[1], img1.shape[0]))
cv2.imshow("resized", resized)
print('template shape:', img1.shape)
print('resized shape:',resized.shape)
cv2.waitKey()