Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/iphone/39.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
iphone应用程序中图像处理中的RGB_Iphone_Cocoa Touch_Image Processing - Fatal编程技术网

iphone应用程序中图像处理中的RGB

iphone应用程序中图像处理中的RGB,iphone,cocoa-touch,image-processing,Iphone,Cocoa Touch,Image Processing,我正在mp应用程序中进行图像处理。我从图像中获得像素颜色,并通过触摸将其应用于图像。。我的代码得到像素的颜色,但它改变了蓝色的整个图像,并应用在图像处理蓝色。我被困在代码中。但是我不知道我的代码出了什么问题。请你帮助我 我的代码是: -(void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event { UITouch *touch = [touches anyObject]; CGPoint coordinateTouch = [

我正在mp应用程序中进行图像处理。我从图像中获得像素颜色,并通过触摸将其应用于图像。。我的代码得到像素的颜色,但它改变了蓝色的整个图像,并应用在图像处理蓝色。我被困在代码中。但是我不知道我的代码出了什么问题。请你帮助我

我的代码是:

-(void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event 
{
UITouch *touch = [touches anyObject];
CGPoint coordinateTouch = [touch locationInView:[self view]];//where image was tapped

if (value == YES) {
    self.lastColor = [self getPixelColorAtLocation:coordinateTouch]; 
    value =NO;
}

NSLog(@"color %@",lastColor);
//[pickedColorDelegate pickedColor:(UIColor*)self.lastColor];



ListPoint point;
point.x = coordinateTouch.x;
point.y = coordinateTouch.y;

button = [UIButton buttonWithType:UIButtonTypeCustom];
button.backgroundColor = [UIColor whiteColor];
button.frame = CGRectMake(coordinateTouch.x-5, coordinateTouch.y-5, 2, 2);
//[descImageView addSubview:button];

[bgImage addSubview:button];

// Make image blurred on ImageView
if(bgImage.image)
{

    CGImageRef imgRef = [[bgImage image] CGImage];
    CFDataRef dataRef = CGDataProviderCopyData(CGImageGetDataProvider(imgRef)); 
    const unsigned char *sourceBytesPtr = CFDataGetBytePtr(dataRef);
    int len = CFDataGetLength(dataRef);
    NSLog(@"length = %d, width = %d, height = %d, bytes per row = %d, bit per pixels = %d", 
          len, CGImageGetWidth(imgRef), CGImageGetHeight(imgRef), CGImageGetBytesPerRow(imgRef), CGImageGetBitsPerPixel(imgRef));

    int width = CGImageGetWidth(imgRef);
    int height = CGImageGetHeight(imgRef);
    int widthstep = CGImageGetBytesPerRow(imgRef);
    unsigned char *pixelData = (unsigned char *)malloc(len);
    double wFrame = bgImage.frame.size.width;
    double hFrame = bgImage.frame.size.height;

    Image_Correction(sourceBytesPtr, pixelData, widthstep, width, height, wFrame, hFrame, point);

    NSLog(@"finish");


    NSData *data = [NSData dataWithBytes:pixelData length:len];

    NSLog(@"1");
    CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);

    NSLog(@"2");
    CGColorSpaceRef colorSpace2 = CGColorSpaceCreateDeviceRGB();

    NSLog(@"3");
    CGImageRef imageRef = CGImageCreate(width, height, 8, CGImageGetBitsPerPixel(imgRef), CGImageGetBytesPerRow(imgRef),
                                        colorSpace2,kCGImageAlphaNoneSkipFirst|kCGBitmapByteOrder32Host,
                                        provider, NULL, false, kCGRenderingIntentDefault);

    NSLog(@"Start processing image");
    UIImage *ret = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationUp];
    CGImageRelease(imageRef);
    CGDataProviderRelease(provider);
    CGColorSpaceRelease(colorSpace2);
    CFRelease(dataRef);
    free(pixelData);
    NSLog(@"4");
    bgImage.image = ret;
    [button removeFromSuperview];
}   
}




- (UIColor*) getPixelColorAtLocation:(CGPoint)point {


UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }

size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}}; 

// Draw the image to the bitmap context. Once we draw, the memory 
// allocated for the context for rendering will then contain the 
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage); 

// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
    //offset locates the pixel in the data from x,y. 
    //4 for 4 bytes of data per pixel, w is width of one row of data.
    int offset = 4*((w*round(point.y))+round(point.x));
     alpha =  data[offset]; 
     red = data[offset+1]; 
     green = data[offset+2]; 
     blue = data[offset+3]; 
    NSLog(@"offset: %i colors: RGB A %i %i %i  %i",offset,red,green,blue,alpha);
    color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}

// When finished, release the context
CGContextRelease(cgctx); 
// Free image data memory for the context
if (data) { free(data); }

return color;
}





- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage {

CGContextRef    context = NULL;
CGColorSpaceRef colorSpace;
void *          bitmapData;
int             bitmapByteCount;
int             bitmapBytesPerRow;

// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);

// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow   = (pixelsWide * 4);
bitmapByteCount     = (bitmapBytesPerRow * pixelsHigh);

// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();

if (colorSpace == NULL)
{
    fprintf(stderr, "Error allocating color space\n");
    return NULL;
}

// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL) 
{
    fprintf (stderr, "Memory not allocated!");
    CGColorSpaceRelease( colorSpace );
    return NULL;
}

// Create the bitmap context. We want pre-multiplied ARGB, 8-bits 
// per component. Regardless of what the source image format is 
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
                                 pixelsWide,
                                 pixelsHigh,
                                 8,      // bits per component
                                 bitmapBytesPerRow,
                                 colorSpace,
                                 kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
    free (bitmapData);
    fprintf (stderr, "Context not created!");
}

// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );

return context;
}



int Image_Correction(const unsigned char *pImage, unsigned char *rImage, int widthstep, int nW, int nH, double wFrame, double hFrame, ListPoint point)              

{
double ratiox = nW/wFrame;
double ratioy = nH/hFrame;
double newW, newH, ratio;
if(ratioy > ratiox)
{
    newH = hFrame;
    newW = nW/ratioy;
    ratio = ratioy;
}
else 
{
    newH = nH/ratiox;
    newW = wFrame;
    ratio = ratiox;
}
NSLog(@"new H, W = %f, %f", newW, newH);
NSLog(@"ratiox = %f; ratioy = %f", ratiox, ratioy);

ListPoint real_point;
real_point.x = (point.x - wFrame/2 + newW/2) *ratio;
real_point.y = (point.y - hFrame/2 + newH/2)*ratio;

for(int h = 0; h < nH; h++)
{
    for(int k = 0; k < nW; k++)
    {
        rImage[h*widthstep + k*4 + 0] = pImage[h*widthstep + k*4 + 0];
        rImage[h*widthstep + k*4 + 1] = pImage[h*widthstep + k*4 + 1];
        rImage[h*widthstep + k*4 + 2] = pImage[h*widthstep + k*4 + 2];
        rImage[h*widthstep + k*4 + 3] = pImage[h*widthstep + k*4 + 3];
    }
}

// Modify this parameter to change Blurred area
int iBlurredArea = 6;
for(int h = -ratio*iBlurredArea; h <= ratio*iBlurredArea; h++)
    for(int k = -ratio*iBlurredArea; k <= ratio*iBlurredArea; k++)
    {
        int tempx = real_point.x + k;
        int tempy = real_point.y + h;
        if (((tempy - 3) > 0)&&((tempy+3) >0)&&((tempx - 3) > 0)&&((tempx + 3) >0)) 
        {
            double sumR = 0;
            double sumG = 0;
            double sumB = 0;
            double sumA = 0; 
            double count = 0;
            for(int m = -3; m < 4; m++)
                for (int n = -3; n < 4; n++) 
                {                       
                    sumR = red;//sumR + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 0];
                    sumG = green;//sumG + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 1];
                    sumB = blue;//sumB + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 2];
                    sumA = alpha;//sumA + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 3];
                    count++;
                }



            rImage[tempy*widthstep + tempx*4 + 0] = red;//sumR/count;
            rImage[tempy*widthstep + tempx*4 + 1] = green;//sumG/count;
            rImage[tempy*widthstep + tempx*4 + 2] = blue;//sumB/count;
            rImage[tempy*widthstep + tempx*4 + 3] = alpha;//sumA/count;
        }
    }
return 1;
}
-(void)touchesbeated:(NSSet*)toucheevent:(UIEvent*)event
{
UITouch*touch=[触摸任何对象];
CGPoint coordinateTouch=[touch locationInView:[self view]];//点击图像的位置
如果(值==是){
self.lastColor=[self-getPixelColorAtLocation:coordinateTouch];
数值=否;
}
NSLog(@“颜色%@”,lastColor);
//[pickedColorDelegate pickedColor:(UIColor*)self.lastColor];
列表点;
点x=坐标接触点x;
点y=坐标接触点y;
button=[UIButton按钮类型:UIButtonTypeCustom];
button.backgroundColor=[UIColor whiteColor];
button.frame=CGRectMake(coordinateTouch.x-5,coordinateTouch.y-5,2,2);
//[描述图像视图添加子视图:按钮];
[bgImage addSubview:按钮];
//在ImageView上使图像模糊
if(bgImage.image)
{
CGImageRef imgRef=[[bgImage-image]CGImage];
CFDataRef dataRef=CGDataProviderCopyData(cgmagegetdataprovider(imgRef));
const unsigned char*sourceBytesPtr=CFDataGetBytePtr(dataRef);
int len=CFDataGetLength(dataRef);
NSLog(@“长度=%d,宽度=%d,高度=%d,每行字节=%d,每像素位=%d”,
len、CGImageGetWidth(imgRef)、CGImageGetHeight(imgRef)、CGImageGetBytesPerRow(imgRef)、CGImageGetBitsPerPixel(imgRef));
int width=CGImageGetWidth(imgRef);
int height=CGImageGetHeight(imgRef);
int widthstep=CGImageGetBytesPerRow(imgRef);
无符号字符*像素数据=(无符号字符*)malloc(len);
双wFrame=bgImage.frame.size.width;
双hFrame=bgImage.frame.size.height;
图像校正(sourceBytesPtr、pixelData、宽度步长、宽度、高度、wFrame、hFrame、点);
NSLog(@“完成”);
NSData*data=[NSData dataWithBytes:pixelData length:len];
NSLog(@“1”);
CGDataProviderRef provider=CGDataProviderCreateWithCFData((CFDataRef)数据);
NSLog(@“2”);
CGColorSpaceRef colorSpace2=CGColorSpaceCreateDeviceRGB();
NSLog(@“3”);
CGImageRef imageRef=CGImageCreate(宽度、高度、8、CGImageGetBitsPerPixel(imgRef)、CGImageGetBytesPerRow(imgRef),
colorSpace2,kCGImageAlphaNoneSkipFirst | KCGBitMapByteOrder32主机,
提供者,NULL,false,kCGrenderingEntentDefault);
NSLog(@“开始处理图像”);
UIImage*ret=[UIImage imageWithCGImage:imageRef比例:1.0方向:UIImageOrientationUp];
CGImageRelease(imageRef);
CGDataProviderRelease(提供程序);
CGCOLORSPACELEASE(colorSpace2);
CFRelease(dataRef);
免费(像素数据);
NSLog(@“4”);
bgImage.image=ret;
[从SuperView中移除按钮];
}   
}
-(UIColor*)获取像素颜色定位:(CGPoint)点{
UIColor*color=nil;
CGImageRef inImage=self.image.CGImage;
//创建屏幕外位图上下文以绘制图像。格式ARGB为每个像素4个字节:Alpa、红色、绿色、蓝色
CGContextRef cgctx=[self-createARGBBitmapContextFromImage:inImage];
如果(cgctx==NULL){返回nil;/*error*/}
尺寸w=CGImageGetWidth(InImages);
大小\u t h=CGImageGetHeight(InImages);
CGRect rect={{0,0},{w,h};
//将图像绘制到位图上下文。一旦绘制,内存
//然后,为渲染上下文分配的将包含
//指定颜色空间中的原始图像数据。
CGContextDrawImage(cgctx、rect、inImage);
//现在我们可以得到一个指向与位图关联的图像数据的指针
//上下文。
无符号字符*数据=CGBitmapContextGetData(cgctx);
如果(数据!=NULL){
//偏移量从x,y定位数据中的像素。
//4对于每像素4字节的数据,w是一行数据的宽度。
整数偏移=4*((w*圆(点y))+圆(点x));
α=数据[偏移量];
红色=数据[偏移量+1];
绿色=数据[偏移量+2];
蓝色=数据[偏移量+3];
NSLog(@“偏移量:%i颜色:RGB A%i%i%i”,偏移量,红色,绿色,蓝色,alpha);
颜色=[UIColor color WITHRED:(红色/255.0f)绿色:(绿色/255.0f)蓝色:(蓝色/255.0f)阿尔法(阿尔法/255.0f)];
}
//完成后,释放上下文
CGContextRelease(cgctx);
//为上下文释放图像数据内存
if(data){free(data);}
返回颜色;
}
-(CGContextRef)在图像中创建argbbitMapContextFromImage:(CGImageRef){
CGContextRef context=NULL;
CGCOLORSPACTEREF色彩空间;
void*位图数据;
int位映射字节计数;
int bitmapBytesPerRow;
//获取图像宽度和高度。我们将使用整个图像。
大小\u t像素宽度=CGImageGetWidth(inImage);
大小\u t像素H=CGImageGetHeight(inImage);
//声明每行的字节数。此对话框中位图中的每个像素
//示例由4个字节表示;红色、绿色、蓝色和蓝色各8位
//阿尔法。
bitmapBytesPerRow=(像素宽度*4);
bitmapByteCount=(bitmapBytesPerRow*pixelsHigh);
//使用通用RGB颜色空间。
colorSpace=CGColorSpaceCreateDeviceRGB();
if(colorSpace==NULL)
{
fprintf(stderr,“分配颜色空间时出错”);
返回NULL;
}
//为图像数据分配内存。这是内存中的目标
//将渲染位图上下文中的任何图形。
bitmapData=malloc(bitmapByteCount);
if(bitmapData==NULL)
{
fprintf(stderr,“内存未分配!”);
CGCOLORSPACTERELEASE(色彩空间);
返回NULL;
}
//创建位图上下文。我们需要预乘的ARGB,8位
//每个组件。无论源图像格式是什么
//(CMYK、灰度等)它将转换为
//此处由CGBitmapContextCreate指定。
上下文=CGBitmapContextCreate(bitmapData,
pixelsWide,
pixelsHigh,
8,//位
UIImage* modifyImage(UIImage* image)
{
    size_t w = image.size.width;
    size_t h = image.size.height;
    CGFloat scale = image.scale;

    // Create the bitmap context
    UIGraphicsBeginImageContext(CGSizeMake(w*scale, h*scale));
    CGContextRef context = UIGraphicsGetCurrentContext();

    // NOTE you may have to setup a rotation here based on image.imageOrientation
    // but I didn't need to consider that for my images.
    CGContextScaleCTM(context, scale, scale);
    [image drawInRect:CGRectMake(0, 0, w, h)];

    unsigned char* data = CGBitmapContextGetData (context);
    if (data != NULL) {
        size_t height = CGBitmapContextGetHeight(context);
        size_t width = CGBitmapContextGetWidth(context);
        size_t bytesPerRow = CGBitmapContextGetBytesPerRow(context);

        for (int y = 0; y < height; y++) {
            for (int x = 0; x < width; x++) {
                // Not sure why the color info is in BGRA format
                // Look at CGBitmapContextGetBitmapInfo(context) if this format isn't working for you
                int offset = y * bytesPerRow + x * 4;
                unsigned char* blue =  &data[offset];
                unsigned char* green = &data[offset+1];
                unsigned char* red = &data[offset+2];
                unsigned char* alpha = &data[offset+3];

                int newRed = ...; // color calculation code here
                int newGreen = ...;
                int newBlue = ...;

                // Assuming you don't want to change the original alpha value.
                *red = (newRed * *alpha)/255;
                *green = (newGreen * *alpha)/255;
                *blue = (newBlue * *alpha)/255; 
            }               
        }
    }

    CGImageRef newImage = CGBitmapContextCreateImage(context);

    UIImage *done = [UIImage imageWithCGImage:newImage scale:image.scale orientation: image.imageOrientation];
    CGImageRelease(newImage);
    UIGraphicsEndImageContext();

    return done;
}