Opencv,通过仿射变换变换图像

Opencv,通过仿射变换变换图像,opencv,transform,ellipse,affinetransform,Opencv,Transform,Ellipse,Affinetransform,我想把第一幅图像变换成第二幅图像,我想可能是仿射变换。我的问题如下: #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "opencv2/features2d/features2d.hpp" #include <stdio.h> #incl

我想把第一幅图像变换成第二幅图像,我想可能是仿射变换。我的问题如下:

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/features2d/features2d.hpp"

#include <stdio.h>
#include <vector>
#include <iostream>
#include <fstream>

using namespace std;
using namespace cv;


Mat org;
int n=0;
vector<Point> capturePoint;

void on_mouse(int event,int x,int y,int flags,void *ustc)
{
    Point pt;
    char coordinateName[16];

    if (event == CV_EVENT_LBUTTONDOWN)
    {
        pt = Point(x,y);
        cout<<x<<" "<<y<<endl;
        capturePoint.push_back(pt);
        n++;

        circle(org,pt,2,Scalar(255,0,0,0),CV_FILLED,CV_AA,0);
        sprintf(coordinateName,"(%d,%d)",x,y);
        putText(org,coordinateName,pt,FONT_HERSHEY_SIMPLEX,0.5,Scalar(0,0,0,255),1,8);

        //imshow("org",org);

        if(n>=4)
        {
            imshow("org",org);
            cvDestroyAllWindows();
        }
    }
}

int main()
{
    org = imread("1-3.jpg",1);

    namedWindow("org",1);
    setMouseCallback("org",on_mouse,0);// mouse callback;

    imshow("org",org);
    waitKey(0);
    //cout<<capturePoint.size()<<endl;

    //three pairs of corresponding points;

    Point2f srcPoint[3];
    Point2f dstPoint[3];
    srcPoint[0]=capturePoint[0];//mouse click along clockwise direction;
    srcPoint[1]=capturePoint[1];
    srcPoint[2]=capturePoint[2];
    //srcPoint[3]=capturePoint[3];

    dstPoint[0]=Point(0,0);//distances between each corner point are known;
    dstPoint[1]=Point(640,0);//width=320,height=220;
    dstPoint[2]=Point(640,440);
    //dstPoint[3]=Point(0,220);

    Mat warpDst=Mat(org.rows, org.cols, org.type());
    //Mat warpMat = findHomography( srcPoint, dstPoint, 0 );
    Mat warpMat = getAffineTransform(srcPoint,dstPoint);
    warpAffine(org,warpDst,warpMat,org.size());//affine transformation;
    imshow("Warp",warpDst);

    waitKey(0);

    return 0;
}
(1) 。如上所述,我认为变换是仿射变换。因此,第一步是沿顺时针方向单击第一幅图像中的三个角点(鼠标回调函数返回坐标),找到三对对应点,并将其对应点设置为特定坐标(每个角点之间的距离已知)。第二步是使用getAffineTransform()和warpAffine()方法实现仿射变换。但事实证明,这种方法还不够好(见第三幅图),因此有没有改进结果的想法?

我的代码如下所示:

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/features2d/features2d.hpp"

#include <stdio.h>
#include <vector>
#include <iostream>
#include <fstream>

using namespace std;
using namespace cv;


Mat org;
int n=0;
vector<Point> capturePoint;

void on_mouse(int event,int x,int y,int flags,void *ustc)
{
    Point pt;
    char coordinateName[16];

    if (event == CV_EVENT_LBUTTONDOWN)
    {
        pt = Point(x,y);
        cout<<x<<" "<<y<<endl;
        capturePoint.push_back(pt);
        n++;

        circle(org,pt,2,Scalar(255,0,0,0),CV_FILLED,CV_AA,0);
        sprintf(coordinateName,"(%d,%d)",x,y);
        putText(org,coordinateName,pt,FONT_HERSHEY_SIMPLEX,0.5,Scalar(0,0,0,255),1,8);

        //imshow("org",org);

        if(n>=4)
        {
            imshow("org",org);
            cvDestroyAllWindows();
        }
    }
}

int main()
{
    org = imread("1-3.jpg",1);

    namedWindow("org",1);
    setMouseCallback("org",on_mouse,0);// mouse callback;

    imshow("org",org);
    waitKey(0);
    //cout<<capturePoint.size()<<endl;

    //three pairs of corresponding points;

    Point2f srcPoint[3];
    Point2f dstPoint[3];
    srcPoint[0]=capturePoint[0];//mouse click along clockwise direction;
    srcPoint[1]=capturePoint[1];
    srcPoint[2]=capturePoint[2];
    //srcPoint[3]=capturePoint[3];

    dstPoint[0]=Point(0,0);//distances between each corner point are known;
    dstPoint[1]=Point(640,0);//width=320,height=220;
    dstPoint[2]=Point(640,440);
    //dstPoint[3]=Point(0,220);

    Mat warpDst=Mat(org.rows, org.cols, org.type());
    //Mat warpMat = findHomography( srcPoint, dstPoint, 0 );
    Mat warpMat = getAffineTransform(srcPoint,dstPoint);
    warpAffine(org,warpDst,warpMat,org.size());//affine transformation;
    imshow("Warp",warpDst);

    waitKey(0);

    return 0;
}
#包括
#包括
#包括
#包括“opencv2/features2d/features2d.hpp”
#包括
#包括
#包括
#包括
使用名称空间std;
使用名称空间cv;
Mat-org;
int n=0;
矢量捕获点;
鼠标上的void(int事件、int x、int y、int标志、void*ustc)
{
点pt;
碳配位烯[16];
if(event==CV\u event\u LBUTTONDOWN)
{
pt=点(x,y);

cout我使用cv2(python绑定)播放了你的图像。它仍然不完美,但你可以使用参数进一步改进。较小的k可以提高角点检测。阈值也很重要

import cv2
from pylab import *

fp = "qkwWV.jpg"

img = cv2.imread(fp)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# edge and corner detection using cornerHarris
blockSize = 16
ksize  = 7
k = 0.005
corners = cv2.cornerHarris(gray, blockSize, ksize, k)
cv2.normalize( corners, corners, 0, 255, cv2.NORM_MINMAX, cv2.CV_32F, None)

# play with threshold until only 4 corners are left.
figure()
thrs = 173
imshow(corners>thrs, cmap=cm.gray)

# get the center coordinates of 4 groups using k-means
y,x = np.where(corners>thrs)
candidates = np.array(zip(x,y)).astype(np.float32)

term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
flags = cv2.KMEANS_RANDOM_CENTERS
ret, labels, centers = cv2.kmeans(candidates, 4, term_crit, 10, flags)

# draw cross haris on the original image
centers = centers.astype(np.int)
L = 40
for _x, _y in centers: 
    cv2.line(img, (_x-L, _y), (_x+L, _y), (255,0,0), 3)
    cv2.line(img, (_x, _y-L), (_x, _y+L), (255,0,0), 3)

figure()
imshow(img)
show()


我想你是对的,仿射变换就足够了。使用cornerharris检测纸张的角点怎么样?这将使过程自动化,并为仿射变换提供更精确的坐标。@otterb很抱歉这么晚才回复你。我已经厌倦了cornerharris()方法,如您所知,它将检测其他不需要的角点。我想要的是四个“真实”角点。您是否尝试过圆/椭圆检测方法?否则,如果Harris始终检测所有真实角点(以及其他一些角点)你也许可以通过一些启发式的方法来选择那些真正的角点。第三张图片到底有什么问题?它看起来很准确。@Dennis嗨,看看左上角的椭圆,它应该是一个“绝对圆”。另一方面,左下角丢失了。