Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/objective-c/25.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Objective c 如何在iOS(cocos2d)中异步捕获屏幕截图?_Objective C_Ios_Cocos2d Iphone - Fatal编程技术网

Objective c 如何在iOS(cocos2d)中异步捕获屏幕截图?

Objective c 如何在iOS(cocos2d)中异步捕获屏幕截图?,objective-c,ios,cocos2d-iphone,Objective C,Ios,Cocos2d Iphone,我有一个工作截图代码,我从这里得到。问题是我不能异步执行。我的屏幕全黑了 这可能吗?如果是,我将如何做 这是我正在使用的代码 CGSize winSize = [CCDirector sharedDirector].winSize; int screenScale = [UIScreen mainScreen].scale; int width = winSize.width * screenScale; int height = winSize.height * screenScale; N

我有一个工作截图代码,我从这里得到。问题是我不能异步执行。我的屏幕全黑了

这可能吗?如果是,我将如何做

这是我正在使用的代码

CGSize winSize = [CCDirector sharedDirector].winSize;
int screenScale = [UIScreen mainScreen].scale;
int width = winSize.width * screenScale;
int height = winSize.height * screenScale;

NSInteger myDataLength = width * height * 4;

// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);

// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < height; y++)
{
    for(int x = 0; x < width * 4; x++)
    {
        buffer2[(height-(1*screenScale) - y) * width * 4 + x] = buffer[y * 4 * width + x];
    }
}

// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);

// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;

// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);

// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
这是不可能的

OpenGL视图只能从主线程进行修改和访问,因此在另一个线程中获取OpenGL帧缓冲区内容的任何尝试都将失败。

这是不可能的


OpenGL视图只能从主线程进行修改和访问,因此在另一个线程中获取OpenGL帧缓冲区内容的任何尝试都将失败。

以下是一个建议,您可以尝试在单独的线程上执行一些操作,但您仍然必须从主线程获取OpenGL像素。 我的建议是将像素保存到主线程上的缓冲区中,然后安排后台任务使用该缓冲区创建UIImage

- (void) takeScreenshot 
{
CGSize winSize = [CCDirector sharedDirector].winSize;
int screenScale = [UIScreen mainScreen].scale;
int width = winSize.width * screenScale;
int height = winSize.height * screenScale;

NSInteger myDataLength = width * height * 4;

// allocate array and read pixels into it.
buffer = (GLubyte *) malloc(myDataLength); // NOW PART OF THE CLASS DEFINITION
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
[self performSelectorInBackground:@selector(finishScreenshot) withObject:nil];
}
您可以在performSelectorInBackground调用中将缓冲区作为NSData发送,然后在finishScreenshot上添加参数,而不是将缓冲区数组保存为类变量

-(void)finishScreenshot{

CGSize winSize = [CCDirector sharedDirector].winSize;
int screenScale = [UIScreen mainScreen].scale;
int width = winSize.width * screenScale;
int height = winSize.height * screenScale;
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < height; y++)
{
    for(int x = 0; x < width * 4; x++)
    {
        buffer2[(height-(1*screenScale) - y) * width * 4 + x] = buffer[y * 4 * width + x];
    }
}

// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);

// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;

// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);

// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];

}

正如我所说,我不确定这是否可行,但这应该是可行的。尝试这样做,并检查在性能方面是否值得付出努力。

以下是一个建议,您可以尝试在单独的线程上做一些事情,但您仍然必须从主线程获取OpenGL像素。 我的建议是将像素保存到主线程上的缓冲区中,然后安排后台任务使用该缓冲区创建UIImage

- (void) takeScreenshot 
{
CGSize winSize = [CCDirector sharedDirector].winSize;
int screenScale = [UIScreen mainScreen].scale;
int width = winSize.width * screenScale;
int height = winSize.height * screenScale;

NSInteger myDataLength = width * height * 4;

// allocate array and read pixels into it.
buffer = (GLubyte *) malloc(myDataLength); // NOW PART OF THE CLASS DEFINITION
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
[self performSelectorInBackground:@selector(finishScreenshot) withObject:nil];
}
您可以在performSelectorInBackground调用中将缓冲区作为NSData发送,然后在finishScreenshot上添加参数,而不是将缓冲区数组保存为类变量

-(void)finishScreenshot{

CGSize winSize = [CCDirector sharedDirector].winSize;
int screenScale = [UIScreen mainScreen].scale;
int width = winSize.width * screenScale;
int height = winSize.height * screenScale;
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < height; y++)
{
    for(int x = 0; x < width * 4; x++)
    {
        buffer2[(height-(1*screenScale) - y) * width * 4 + x] = buffer[y * 4 * width + x];
    }
}

// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);

// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;

// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);

// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];

}

正如我所说,我不确定这是否可行,但这应该是可行的。试着这样做,并检查在性能方面是否真的值得付出努力。

谢谢代码的运行。现在的问题是缓冲区正在泄漏。我只能生成17个屏幕截图,它会崩溃,我想我只是内存不足。我尝试释放buffer ivar和buffer2局部变量,但截图后它崩溃了。我没有呼叫堆栈,所以我真的不知道发生了什么。我很高兴它发生了。再次查看代码,您必须释放在buffer和buffer2上分配的内存。如果我是你,我会加上freebuffer2;和freebuffer1,buffer1=NULL;谢谢,代码是有效的。现在的问题是缓冲区正在泄漏。我只能生成17个屏幕截图,它会崩溃,我想我只是内存不足。我尝试释放buffer ivar和buffer2局部变量,但截图后它崩溃了。我没有呼叫堆栈,所以我真的不知道发生了什么。我很高兴它发生了。再次查看代码,您必须释放在buffer和buffer2上分配的内存。如果我是你,我会加上freebuffer2;和freebuffer1,buffer1=NULL;