iOS:从背景图像中检索矩形图像
我正在做一个实现,在一个大背景图像中有一个矩形图像。我试图通过编程从大图中检索矩形图像,并从特定的矩形图像中检索文本信息。我试图使用OpenCV第三方框架,但无法从大背景图像中检索矩形图像。请有人指导我,我如何才能做到这一点 更新: 我找到了使用OpenCV查找方形的方法。我可以修改它来查找矩形形状吗?有人能在这方面指导我吗 最新更新: 我终于得到了代码,下面就是iOS:从背景图像中检索矩形图像,ios,opencv,Ios,Opencv,我正在做一个实现,在一个大背景图像中有一个矩形图像。我试图通过编程从大图中检索矩形图像,并从特定的矩形图像中检索文本信息。我试图使用OpenCV第三方框架,但无法从大背景图像中检索矩形图像。请有人指导我,我如何才能做到这一点 更新: 我找到了使用OpenCV查找方形的方法。我可以修改它来查找矩形形状吗?有人能在这方面指导我吗 最新更新: 我终于得到了代码,下面就是 - (cv::Mat)cvMatWithImage:(UIImage *)image { CGColorSpaceR
- (cv::Mat)cvMatWithImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if ( cvMat.elemSize() == 1 ) {
colorSpace = CGColorSpaceCreateDeviceGray();
}
else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
//CFDataRef data;
CGDataProviderRef provider = CGDataProviderCreateWithCFData( (CFDataRef) data ); // It SHOULD BE (__bridge CFDataRef)data
CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease( imageRef );
CGDataProviderRelease( provider );
CGColorSpaceRelease( colorSpace );
return finalImage;
}
-(void)forOpenCV
{
imageView = [UIImage imageNamed:@"myimage.jpg"];
if( imageView != nil )
{
cv::Mat tempMat = [imageView CVMat];
cv::Mat greyMat = [self cvMatWithImage:imageView];
cv::vector<cv::vector<cv::Point> > squares;
cv::Mat img= [self debugSquares: squares: greyMat];
imageView = [self UIImageFromCVMat: img];
self.imageView.image = imageView;
}
}
double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
- (cv::Mat) debugSquares: (std::vector<std::vector<cv::Point> >) squares : (cv::Mat &)image
{
NSLog(@"%lu",squares.size());
// blur will enhance edge detection
//cv::Mat blurred(image);
cv::Mat blurred = image.clone();
medianBlur(image, blurred, 9);
cv::Mat gray0(image.size(), CV_8U), gray;
cv::vector<cv::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&image, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
cv::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
NSLog(@"squares.size(): %lu",squares.size());
for( size_t i = 0; i < squares.size(); i++ )
{
cv::Rect rectangle = boundingRect(cv::Mat(squares[i]));
NSLog(@"rectangle.x: %d", rectangle.x);
NSLog(@"rectangle.y: %d", rectangle.y);
if(i==squares.size()-1)////Detecting Rectangle here
{
const cv::Point* p = &squares[i][0];
int n = (int)squares[i].size();
NSLog(@"%d",n);
line(image, cv::Point(507,418), cv::Point(507+1776,418+1372), cv::Scalar(255,0,0),2,8);
polylines(image, &p, &n, 1, true, cv::Scalar(255,255,0), 5, CV_AA);
int fx1=rectangle.x;
NSLog(@"X: %d", fx1);
int fy1=rectangle.y;
NSLog(@"Y: %d", fy1);
int fx2=rectangle.x+rectangle.width;
NSLog(@"Width: %d", fx2);
int fy2=rectangle.y+rectangle.height;
NSLog(@"Height: %d", fy2);
line(image, cv::Point(fx1,fy1), cv::Point(fx2,fy2), cv::Scalar(0,0,255),2,8);
}
}
return image;
}
-(cv::Mat)cvMatWithImage:(UIImage*)图像
{
CGColorSpaceRef colorSpace=CGImageGetColorSpace(image.CGImage);
CGFloat cols=image.size.width;
CGFloat rows=image.size.height;
cv::Mat cvMat(行、列、cv_8UC4);//每个组件8位,4个通道
CGContextRef contextRef=CGBitmapContextCreate(cvMat.data,//指向备份数据的指针
cols,//位图的宽度
行,//位图的高度
8,//每个组件的位
cvMat.step[0],//每行字节数
colorSpace,//colorSpace
kCGImageAlphaNoneSkipLast|
kCGBitmapByteOrderDefault);//位图信息标志
CGContextDrawImage(contextRef,CGRectMake(0,0,cols,rows),image.CGImage);
CGContextRelease(contextRef);
返回cvMat;
}
-(UIImage*)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData*data=[NSData dataWithBytes:cvMat.data长度:cvMat.elemSize()*cvMat.total()];
CGCOLORSPACTEREF色彩空间;
if(cvMat.elemSize()==1){
colorSpace=CGColorSpaceCreateDeviceGray();
}
否则{
colorSpace=CGColorSpaceCreateDeviceRGB();
}
//CFDataRef数据;
CGDataProviderRef provider=CGDataProviderCreateWithCFData((CFDataRef)data);//它应该是(u_桥CFDataRef)数据
CGImageRef imageRef=CGImageCreate(cvMat.cols,cvMat.rows,8,8*cvMat.elemSize(),cvMat.step[0],颜色空间,KCGIMAGEALPHONE | kCGBitmapByteOrderDefault,提供程序,NULL,false,KCGRendingEntentDefault);
UIImage*finalImage=[UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(提供程序);
CGCOLORSPACTERELEASE(色彩空间);
返回最终授权;
}
-(无效)forOpenCV
{
imageView=[UIImage ImageName:@“myimage.jpg”];
如果(imageView!=nil)
{
cv::Mat tempMat=[imageView CVMat];
cv::Mat greyMat=[self-cvMatWithImage:imageView];
向量平方;
cv::Mat img=[自调试方块:方块:greyMat];
imageView=[self-UIImageFromCVMat:img];
self.imageView.image=imageView;
}
}
双角度(cv::点pt1、cv::点pt2、cv::点pt0){
双dx1=pt1.x-pt0.x;
双dy1=pt1.y-pt0.y;
双dx2=pt2.x-pt0.x;
双dy2=pt2.y-pt0.y;
返回(dx1*dx2+dy1*dy2)/sqrt(dx1*dx1+dy1*dy1)*(dx2*dx2+dy2*dy2)+1e-10);
}
-(cv::Mat)调试方块:(std::vector)方块:(cv::Mat&)图像
{
NSLog(@“%lu”,squares.size());
//模糊将增强边缘检测
//cv::Mat模糊(图像);
cv::Mat fuzzle=image.clone();
中间模糊(图像,模糊,9);
cv::Mat gray0(image.size(),cv_8U),灰色;
向量轮廓;
//在图像的每个颜色平面中查找正方形
对于(int c=0;c<3;c++)
{
int ch[]={c,0};
混音通道(图像,1,&0,1,通道,1);
//尝试几个阈值级别
const int threshold_level=2;
对于(int l=0;l=(l+1)*255/阈值\u级;
}
//找到等高线并将其存储在列表中
findContours(灰色、等高线、等高线列表、等高线链近似简单);
//测试轮廓
向量近似;
对于(size_t i=0;i1000&&
isContourConvex(cv::Mat(近似值)))
{
双最大余弦=0;
对于(int j=2;j<5;j++)
{
双余弦=fabs(角度(约[j%4],约[j-2],约[j-1]);
最大余弦=最大值(最大余弦,余弦);
}
如果(最大余弦<0.3)
正方形。推回(大约);
}
}
}
}
NSLog(@“squares.size():%lu”,squares.size());
对于(size_t i=0;iclass squares
{
public:
static cv::Mat& findSquares( const cv::Mat& image, cv::vector<cv::vector<cv::Point> >& squares );
static cv::Mat& drawSquares( cv::Mat& image, const cv::vector<cv::vector<cv::Point> >& squares );
};
//remove 'magic numbers' from original C++ source so we can manipulate them from obj-C
#define TOLERANCE 0.01
#define THRESHOLD 50
#define LEVELS 9
UIImage* image =
[CVSquaresWrapper detectedSquaresInImage:self.image
tolerance:TOLERANCE
threshold:THRESHOLD
levels:LEVELS];
// CVSquaresWrapper.h
#import <Foundation/Foundation.h>
@interface CVSquaresWrapper : NSObject
+ (UIImage*) detectedSquaresInImage:(UIImage*)image
tolerance:(CGFloat)tolerance
threshold:(NSInteger)threshold
levels:(NSInteger)levels;
@end
// CVSquaresWrapper.mm
// wrapper that talks to c++ and to obj-c classes
#import "CVSquaresWrapper.h"
#import "CVSquares.h"
#import "UIImage+OpenCV.h"
@implementation CVSquaresWrapper
+ (UIImage*) detectedSquaresInImage:(UIImage*) image
tolerance:(CGFloat)tolerance
threshold:(NSInteger)threshold
levels:(NSInteger)levels
{
UIImage* result = nil;
//convert from UIImage to cv::Mat openCV image format
//this is a category on UIImage
cv::Mat matImage = [image CVMat];
//call the c++ class static member function
//we want this function signature to exactly
//mirror the form of the calling method
matImage = CVSquares::detectedSquaresInImage (matImage, tolerance, threshold, levels);
//convert back from cv::Mat openCV image format
//to UIImage image format (category on UIImage)
result = [UIImage imageFromCVMat:matImage];
return result;
}
@end
// CVSquares.h
#ifndef __OpenCVClient__CVSquares__
#define __OpenCVClient__CVSquares__
//class definition
//in this example we do not need a class
//as we have no instance variables and just one static function.
//We could instead just declare the function but this form seems clearer
class CVSquares
{
public:
static cv::Mat detectedSquaresInImage (cv::Mat image, float tol, int threshold, int levels);
};
#endif /* defined(__OpenCVClient__CVSquares__) */
// CVSquares.cpp
#include "CVSquares.h"
using namespace std;
using namespace cv;
static int thresh = 50, N = 11;
static float tolerance = 0.01;
//declarations added so that we can move our
//public function to the top of the file
static void findSquares( const Mat& image, vector<vector<Point> >& squares );
static void drawSquares( Mat& image, vector<vector<Point> >& squares );
//this public function performs the role of
//main{} in the original file (main{} is deleted)
cv::Mat CVSquares::detectedSquaresInImage (cv::Mat image, float tol, int threshold, int levels)
{
vector<vector<Point> > squares;
if( image.empty() )
{
cout << "Couldn't load " << endl;
}
tolerance = tol;
thresh = threshold;
N = levels;
findSquares(image, squares);
drawSquares(image, squares);
return image;
}
// the rest of this file is identical to the original squares.cpp except:
// main{} is removed
// this line is removed from drawSquares:
// imshow(wndname, image);
// (obj-c will do the drawing)
//UIImage+OpenCV.h
#import <UIKit/UIKit.h>
@interface UIImage (UIImage_OpenCV)
//cv::Mat to UIImage
+ (UIImage *)imageFromCVMat:(cv::Mat&)cvMat;
//UIImage to cv::Mat
- (cv::Mat)CVMat;
@end