Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/119.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Objective c 透视变换&x2B;使用OpenCV在iOS中进行裁剪_Objective C_Ios_Image Processing_Opencv_Perspective - Fatal编程技术网

Objective c 透视变换&x2B;使用OpenCV在iOS中进行裁剪

Objective c 透视变换&x2B;使用OpenCV在iOS中进行裁剪,objective-c,ios,image-processing,opencv,perspective,Objective C,Ios,Image Processing,Opencv,Perspective,我正在尝试在即将推出的应用程序中实现裁剪和透视校正功能。在进行研究时,我发现: 因此,我决定尝试用OpenCV实现这一功能——框架就在那里,所以安装速度很快。然而,我没有得到我所希望的结果:(第二张图是结果) 我已经翻译了所有代码以使用Xcode,并对坐标进行了三次检查。你能告诉我我的代码有什么问题吗?为了完整起见,我还包括了UIImage->Mat转换+反转: - (void)confirmedImage { if ([_adjustRect frameEdited]) {

我正在尝试在即将推出的应用程序中实现裁剪和透视校正功能。在进行研究时,我发现:

因此,我决定尝试用OpenCV实现这一功能——框架就在那里,所以安装速度很快。然而,我没有得到我所希望的结果:(第二张图是结果)

我已经翻译了所有代码以使用Xcode,并对坐标进行了三次检查。你能告诉我我的代码有什么问题吗?为了完整起见,我还包括了UIImage->Mat转换+反转:

- (void)confirmedImage
{
    if ([_adjustRect frameEdited]) {

    cv::Mat src = [self cvMatFromUIImage:_sourceImage];

    // My original Coordinates
    // 4-------3
    // |       |
    // |       |
    // |       |
    // 1-------2

    CGFloat scaleFactor =  [_sourceImageView contentScale];
    CGPoint p1 = [_adjustRect coordinatesForPoint:4 withScaleFactor:scaleFactor];
    CGPoint p2 = [_adjustRect coordinatesForPoint:3 withScaleFactor:scaleFactor];
    CGPoint p3 = [_adjustRect coordinatesForPoint:1 withScaleFactor:scaleFactor];
    CGPoint p4 = [_adjustRect coordinatesForPoint:2 withScaleFactor:scaleFactor];

    std::vector<cv::Point2f> c1;
    c1.push_back(cv::Point2f(p1.x, p1.y));
    c1.push_back(cv::Point2f(p2.x, p2.y));
    c1.push_back(cv::Point2f(p3.x, p3.y));
    c1.push_back(cv::Point2f(p4.x, p4.y));

    cv::RotatedRect box = minAreaRect(cv::Mat(c1));
    cv::Point2f pts[4];
    box.points(pts);

    cv::Point2f src_vertices[3];
    src_vertices[0] = pts[0];
    src_vertices[1] = pts[1];
    src_vertices[2] = pts[3];

    cv::Point2f dst_vertices[4];
    dst_vertices[0].x = 0;
    dst_vertices[0].y = 0;

    dst_vertices[1].x = box.boundingRect().width-1;
    dst_vertices[1].y = 0;

    dst_vertices[2].x = 0;
    dst_vertices[2].y = box.boundingRect().height-1;

    dst_vertices[3].x = box.boundingRect().width-1;
    dst_vertices[3].y = box.boundingRect().height-1;

    cv::Mat warpAffineMatrix = getAffineTransform(src_vertices, dst_vertices);

    cv::Mat rotated;
    cv::Size size(box.boundingRect().width, box.boundingRect().height);
    warpAffine(src, rotated, warpAffineMatrix, size, cv::INTER_LINEAR, cv::BORDER_CONSTANT);


    [_sourceImageView setNeedsDisplay];
    [_sourceImageView setImage:[self UIImageFromCVMat:rotated]];
    [_sourceImageView setContentMode:UIViewContentModeScaleAspectFit];

    rotated.release();
    src.release();

    }
}

- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
    CGColorSpaceRef colorSpace;
    if ( cvMat.elemSize() == 1 ) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    }
    else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }
    CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data );
    CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
    UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
    CGImageRelease( imageRef );
    CGDataProviderRelease( provider );
    CGColorSpaceRelease( colorSpace );
    return finalImage;
}

- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
    CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage );
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;
    cv::Mat cvMat( rows, cols, CV_8UC4 );
    CGContextRef contextRef = CGBitmapContextCreate( cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault );
    CGContextDrawImage( contextRef, CGRectMake(0, 0, rows, cols), image.CGImage );
    CGContextRelease( contextRef );
    CGColorSpaceRelease( colorSpace );
    return cvMat;
}
-(作废)确认页
{
如果([\u adjustRect frameEdited]){
cv::Mat src=[self-cvMatFromUIImage:_sourceImage];
//我的原始坐标
// 4-------3
// |       |
// |       |
// |       |
// 1-------2
CGFloat scaleFactor=[[u sourceImageView contentScale];
CGPoint p1=[[u调整点:4的直角坐标和比例因子:比例因子];
CGPoint p2=[[u调整点3的直角坐标,带比例因子:比例因子];
CGPoint p3=[[点1的调整直角坐标,带比例因子:比例因子];
CGPoint p4=[[u调整点:2的直角坐标,带比例因子:比例因子];
std::向量c1;
c1.向后推(cv::点2F(p1.x,p1.y));
c1.向后推(cv::Point2f(p2.x,p2.y));
c1.向后推(cv::点2F(p3.x,p3.y));
c1.向后推(cv::Point2f(p4.x,p4.y));
cv::RotatedRect box=MinareRect(cv::Mat(c1));
cv::Point2f pts[4];
框点(pts);
cv::Point2f src_顶点[3];
src_顶点[0]=pts[0];
src_顶点[1]=pts[1];
src_顶点[2]=pts[3];
cv::Point2f dst_顶点[4];
dst_顶点[0].x=0;
dst_顶点[0]。y=0;
dst_顶点[1].x=box.boundingRect().width-1;
dst_顶点[1]。y=0;
dst_顶点[2].x=0;
dst_顶点[2]。y=box.boundingRect().height-1;
dst_顶点[3].x=box.boundingRect().width-1;
dst_顶点[3]。y=box.boundingRect().height-1;
cv::Mat warpAffineMatrix=getAffineTransform(src_顶点,dst_顶点);
cv::旋转垫;
cv::Size Size(box.boundingRect()宽度,box.boundingRect()高度);
翘曲仿射(src,旋转,翘曲仿射矩阵,大小,cv::INTER_线性,cv::BORDER_常数);
[_sourceimageviewsetneedsdisplay];
[\u sourceImageView setImage:[self-UIImageFromCVMat:rotated];
[\u sourceImageView setContentMode:UIViewContentModeScaleSpectFit];
旋转。释放();
src.release();
}
}
-(UIImage*)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData*data=[NSData dataWithBytes:cvMat.data长度:cvMat.elemSize()*cvMat.total()];
CGCOLORSPACTEREF色彩空间;
if(cvMat.elemSize()==1){
colorSpace=CGColorSpaceCreateDeviceGray();
}
否则{
colorSpace=CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider=CGDataProviderCreateWithCFData((_桥CFDataRef)数据);
CGImageRef imageRef=CGImageCreate(cvMat.cols,cvMat.rows,8,8*cvMat.elemSize(),cvMat.step[0],颜色空间,KCGIMAGEALPHONE | kCGBitmapByteOrderDefault,提供程序,NULL,false,KCGRendingEntentDefault);
UIImage*finalImage=[UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(提供程序);
CGCOLORSPACTERELEASE(色彩空间);
返回最终授权;
}
-(cv::Mat)cvMatFromUIImage:(UIImage*)图像
{
CGColorSpaceRef colorSpace=CGImageGetColorSpace(image.CGImage);
CGFloat cols=image.size.width;
CGFloat rows=image.size.height;
cv::Mat cvMat(行、列、cv_8UC4);
CGContextRef contextRef=CGBitmapContextCreate(cvMat.data,cols,rows,8,cvMat.step[0],颜色空间,kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef,CGRectMake(0,0,行,列),image.CGImage);
CGContextRelease(contextRef);
CGCOLORSPACTERELEASE(色彩空间);
返回cvMat;
}
这是解决我问题的正确方法吗?你有什么样的代码可以帮我吗

谢谢你阅读我的问题

UDATE:

我已经在这里公开了我的UIImagePickerController更换件:
其中包括可调整的裁剪视图、过滤器和透视校正。

我认为
getAffineTransform
中的点对应不正确

检查
框点(pts)输出的点坐标


为什么不直接使用
p1p2p3p4
来计算转换?

因此,经过几天的尝试,我想出了一个解决方案(忽略第二张图像上的蓝点):

正如承诺的那样,下面是代码的完整副本:

- (void)confirmedImage
{
    cv::Mat originalRot = [self cvMatFromUIImage:_sourceImage];
    cv::Mat original;
    cv::transpose(originalRot, original);

    originalRot.release();

    cv::flip(original, original, 1);


    CGFloat scaleFactor =  [_sourceImageView contentScale];

    CGPoint ptBottomLeft = [_adjustRect coordinatesForPoint:1 withScaleFactor:scaleFactor];
    CGPoint ptBottomRight = [_adjustRect coordinatesForPoint:2 withScaleFactor:scaleFactor];
    CGPoint ptTopRight = [_adjustRect coordinatesForPoint:3 withScaleFactor:scaleFactor];
    CGPoint ptTopLeft = [_adjustRect coordinatesForPoint:4 withScaleFactor:scaleFactor];

    CGFloat w1 = sqrt( pow(ptBottomRight.x - ptBottomLeft.x , 2) + pow(ptBottomRight.x - ptBottomLeft.x, 2));
    CGFloat w2 = sqrt( pow(ptTopRight.x - ptTopLeft.x , 2) + pow(ptTopRight.x - ptTopLeft.x, 2));

    CGFloat h1 = sqrt( pow(ptTopRight.y - ptBottomRight.y , 2) + pow(ptTopRight.y - ptBottomRight.y, 2));
    CGFloat h2 = sqrt( pow(ptTopLeft.y - ptBottomLeft.y , 2) + pow(ptTopLeft.y - ptBottomLeft.y, 2));

    CGFloat maxWidth = (w1 < w2) ? w1 : w2;
    CGFloat maxHeight = (h1 < h2) ? h1 : h2;

    cv::Point2f src[4], dst[4];
    src[0].x = ptTopLeft.x;
    src[0].y = ptTopLeft.y;
    src[1].x = ptTopRight.x;
    src[1].y = ptTopRight.y;
    src[2].x = ptBottomRight.x;
    src[2].y = ptBottomRight.y;
    src[3].x = ptBottomLeft.x;
    src[3].y = ptBottomLeft.y;

    dst[0].x = 0;
    dst[0].y = 0;
    dst[1].x = maxWidth - 1;
    dst[1].y = 0;
    dst[2].x = maxWidth - 1;
    dst[2].y = maxHeight - 1;
    dst[3].x = 0;
    dst[3].y = maxHeight - 1;

    cv::Mat undistorted = cv::Mat( cvSize(maxWidth,maxHeight), CV_8UC1);
    cv::warpPerspective(original, undistorted, cv::getPerspectiveTransform(src, dst), cvSize(maxWidth, maxHeight));

    UIImage *newImage = [self UIImageFromCVMat:undistorted];

    undistorted.release();
    original.release();

    [_sourceImageView setNeedsDisplay];
    [_sourceImageView setImage:newImage];
    [_sourceImageView setContentMode:UIViewContentModeScaleAspectFit];

}

- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];

    CGColorSpaceRef colorSpace;

    if (cvMat.elemSize() == 1) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    } else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }

    CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);

    CGImageRef imageRef = CGImageCreate(cvMat.cols,                                     // Width
                                        cvMat.rows,                                     // Height
                                        8,                                              // Bits per component
                                        8 * cvMat.elemSize(),                           // Bits per pixel
                                        cvMat.step[0],                                  // Bytes per row
                                        colorSpace,                                     // Colorspace
                                        kCGImageAlphaNone | kCGBitmapByteOrderDefault,  // Bitmap info flags
                                        provider,                                       // CGDataProviderRef
                                        NULL,                                           // Decode
                                        false,                                          // Should interpolate
                                        kCGRenderingIntentDefault);                     // Intent

    UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
    CGImageRelease(imageRef);
    CGDataProviderRelease(provider);
    CGColorSpaceRelease(colorSpace);

    return image;
}

- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
    CGFloat cols = image.size.height;
    CGFloat rows = image.size.width;

    cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels

    CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,                 // Pointer to backing data
                                                    cols,                       // Width of bitmap
                                                    rows,                       // Height of bitmap
                                                    8,                          // Bits per component
                                                    cvMat.step[0],              // Bytes per row
                                                    colorSpace,                 // Colorspace
                                                    kCGImageAlphaNoneSkipLast |
                                                    kCGBitmapByteOrderDefault); // Bitmap info flags

    CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
    CGContextRelease(contextRef);

    return cvMat;
}
-(作废)确认页
{
cv::Mat originalRot=[self-cvMatFromUIImage:_sourceImage];
cv::原材料;
cv::转座酶(原始,原始);
原始腐烂释放();
cv::翻转(原始,原始,1);
CGFloat scaleFactor=[[u sourceImageView contentScale];
CGPoint ptBottomLeft=[[点1的调整直角坐标,带比例因子:比例因子];
CGPoint ptBottomRight=[[点2的调整直角坐标,带比例因子:比例因子];
CGPoint ptTopRight=[[u调整点:3的直接坐标和比例因子:比例因子];
CGPoint ptopleft=[[u调整点的直角坐标:4和比例因子:scaleFactor];
CGFloat w1=sqrt(功率(ptBottomRight.x-ptBottomLeft.x,2)+功率(ptBottomRight.x-ptBottomLeft.x,2));
CGFloat w2=sqrt(功率(ptTopRight.x-ptTopLeft.x,2)+功率(ptTopRight.x-ptTopLeft.x,2));
CGFloat h1=sqrt(功率(ptTopRight.y-ptBottomRight.y,2)+功率(ptTopRight.y-ptBottomRight.y,2));
CGFloat h2=sqrt(功率(ptTopLeft.y-ptBottomLeft.y,2)+功率(ptTopLeft.y-ptBottomLeft.y,2));
CGFloat maxWidth=(w1