Iphone 如何从EagleView的纹理中获取UIImage?
我正在尝试使用以下方法从加载到EagleView的纹理中获取UIImage:Iphone 如何从EagleView的纹理中获取UIImage?,iphone,opengl-es,uiimage,core-graphics,eaglview,Iphone,Opengl Es,Uiimage,Core Graphics,Eaglview,我正在尝试使用以下方法从加载到EagleView的纹理中获取UIImage: //Try to get UIImage from EAGLView and assign to imageDataPhoto1 UIGraphicsBeginImageContext(myEAGLView.bounds.size); [myEAGLView.layer renderInContext:UIGraphicsGetCurrentContext()]; imageDataPhoto1 = UIGraphi
//Try to get UIImage from EAGLView and assign to imageDataPhoto1
UIGraphicsBeginImageContext(myEAGLView.bounds.size);
[myEAGLView.layer renderInContext:UIGraphicsGetCurrentContext()];
imageDataPhoto1 = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
[imageDataPhoto1 retain];
不幸的是,当我将生成的UIImage(imageDataPhoto1)分配给UIImageView时,它只是一个空白黑色。我应该做点别的吗?顺便说一下,我用以下方法从相机加载一幅图像作为EagleView的纹理(代码属于Apple):
-------来自纹理------
#导入
#导入“Texture.h”
静态无符号整数nextPOT(无符号整数x)
{
x=x-1;
x=x |(x>>1);
x=x |(x>>2);
x=x |(x>>4);
x=x |(x>>8);
x=x |(x>>16);
返回x+1;
}
//这不是一个完全通用的图像加载程序。这是一个如何使用的例子
//CGImage可直接访问解压缩的图像数据。只有最常见的
//支持使用的图像格式。有必要扩展此代码
//说明其他用途,例如立方体贴图或压缩纹理。
//
//如果支持图像格式,此加载程序将生成OpenGL 2D纹理对象
//并从中上传texel,如果需要,填充到POT中。为了图像处理的目的,
//边界像素也在此处复制,以确保在模糊等过程中进行适当过滤。
//
//此函数的调用方负责删除GL纹理对象。
void loadTexture(常量字符*名称、图像*img、渲染器信息*渲染器、CGImageRef newImage)
{
GLuint texID=0,分量,x,y;
GLuint imgWide,imgHigh;//实际图像大小
GLuint rowBytes,rowPixels;//由CGImage填充的图像大小
GLuint POTWide,POTHigh;//图像大小填充为下一个二次方
CGBitmapInfo info;//CGImage组件布局信息
CGColorSpaceModel colormodel;//CGImage colormodel(RGB、CMYK、调色板等)
内盂,形态;
GLubyte*像素,*temp=NULL;
CGImageRef CGImage=新图像;
rt_断言(CGImage);
如果(!CGImage){
NSLog(@“无CGImage!”);
回来
}
//解析CGImage信息
info=CGImageGetBitmapInfo(CGImage);//CGImage可以按RGBA、BGRA或ARGB顺序返回像素
colormodel=CGColorSpaceGetModel(CGImageGetColorSpace(CGImage));
大小\u t bpp=CGImageGetBitsPerPixel(CGImage);
如果(bpp<8 | | bpp>32 | | |(colormodel!=KCGColorSpaceModel单色&colormodel!=kCGColorSpaceModelRGB))
{
//此加载程序不支持所有可能的CGImage类型,例如调色板图像
CGImageRelease(CGImage);
回来
}
组件=bpp>>3;
rowBytes=CGImageGetBytesPerRow(CGImage);//CGImage可以填充行
行像素=行字节/组件;
imgWide=CGImageGetWidth(CGImage);
imgHigh=CGImageGetHeight(CGImage);
img->wide=行像素;
img->high=imgHigh;
img->s=(浮点)imgWide/rowPixels;
img->t=1.0;
//选择OpenGL格式
交换机(bpp)
{
违约:
rt_断言(0&“未知CGImage bpp”);
案例32:
{
内部=GL_RGBA;
开关(信息和kCGBitmapAlphaInfoMask)
{
案例KCGIMAGEALPHA预乘优先:
案例kCGImageAlphaFirst:
案例KCGIMAGEALPHANEONSKIPFIRST:
格式=GL_BGRA;
打破
违约:
格式=GL_RGBA;
}
打破
}
案例24:
内部=格式=总账_RGB;
打破
案例16:
内部=格式=GL_亮度_α;
打破
案例8:
内部=格式=GL\U亮度;
打破
}
//获取指向未压缩图像数据的指针。
//
//这允许访问原始(可能未经复制)数据,但不允许进行任何操作
//(如缩放)必须手动完成。将此与绘制图像进行对比
//进入CGBitmapContext,它允许缩放,但总是强制预乘。
CFDataRef data=CGDataProviderCopyData(CGImageGetDataProvider(CGImage));
rt_断言(数据);
像素=(GLubyte*)CFDataGetBytePtr(数据);
rt_断言(像素);
//如果CGImage组件布局与OpenGL不兼容,请修复它。
//在设备上,CGImage通常会返回BGRA或RGBA。
//在模拟器上,CGImage可能返回ARGB,具体取决于文件格式。
如果(格式==GL_BGRA)
{
uint32_t*p=(uint32_t*)像素;
int i,num=img->wide*img->high;
if((信息和kCGBitmapByteOrderMask)!=kCGBitmapByteOrder32Host)
{
//从ARGB转换为BGRA
对于(i=0;i>24);
}
//所有当前的iPhoneOS设备都通过扩展支持BGRA。
如果(!renderer->extension[IMG\u texture\u format\u bgra888])
{
格式=GL_RGBA;
//从BGRA转换为RGBA
对于(i=0;i>16)和0xFF)|(p[i]和0xFF00FF00)|((p[i]&0xFF)16)和0xFF00);
#恩迪夫
}
}
//确定是否需要将此图像填充为2的幂。
//有多种方法可以在仅支持POT的渲染器上处理NPOT图像:
//1)将图像缩小到罐大小。质量下降。
//2)将图像填充到容器大小。浪费内存。
//3)将图像分割为多个POT纹理。需要更多的渲染逻辑。
//
//我们这里只处理一幅图像,为了简单起见,选择2)。
//
//如果喜欢1),可以使用CoreGraphics将图像缩放到CGBitmapContext中。
POTWide=nextPOT(img->wide);
POTHigh=下一个点(img->high);
如果(!renderer->extension[APPLE_texture_2D_limited_npot]&&&(img->wide!=POTWide|img->high!=pothhigh))
{
GLuint dstBytes=POTWide*组件;
GLubyte*temp=(GLubyte*)malloc(dstBytes*
#import <UIKit/UIKit.h>
#import "Texture.h"
static unsigned int nextPOT(unsigned int x)
{
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
return x + 1;
}
// This is not a fully generalized image loader. It is an example of how to use
// CGImage to directly access decompressed image data. Only the most commonly
// used image formats are supported. It will be necessary to expand this code
// to account for other uses, for example cubemaps or compressed textures.
//
// If the image format is supported, this loader will Gen a OpenGL 2D texture object
// and upload texels from it, padding to POT if needed. For image processing purposes,
// border pixels are also replicated here to ensure proper filtering during e.g. blur.
//
// The caller of this function is responsible for deleting the GL texture object.
void loadTexture(const char *name, Image *img, RendererInfo *renderer, CGImageRef newImage)
{
GLuint texID = 0, components, x, y;
GLuint imgWide, imgHigh; // Real image size
GLuint rowBytes, rowPixels; // Image size padded by CGImage
GLuint POTWide, POTHigh; // Image size padded to next power of two
CGBitmapInfo info; // CGImage component layout info
CGColorSpaceModel colormodel; // CGImage colormodel (RGB, CMYK, paletted, etc)
GLenum internal, format;
GLubyte *pixels, *temp = NULL;
CGImageRef CGImage = newImage;
rt_assert(CGImage);
if (!CGImage){
NSLog(@"No CGImage!");
return;
}
// Parse CGImage info
info = CGImageGetBitmapInfo(CGImage); // CGImage may return pixels in RGBA, BGRA, or ARGB order
colormodel = CGColorSpaceGetModel(CGImageGetColorSpace(CGImage));
size_t bpp = CGImageGetBitsPerPixel(CGImage);
if (bpp < 8 || bpp > 32 || (colormodel != kCGColorSpaceModelMonochrome && colormodel != kCGColorSpaceModelRGB))
{
// This loader does not support all possible CGImage types, such as paletted images
CGImageRelease(CGImage);
return;
}
components = bpp>>3;
rowBytes = CGImageGetBytesPerRow(CGImage); // CGImage may pad rows
rowPixels = rowBytes / components;
imgWide = CGImageGetWidth(CGImage);
imgHigh = CGImageGetHeight(CGImage);
img->wide = rowPixels;
img->high = imgHigh;
img->s = (float)imgWide / rowPixels;
img->t = 1.0;
// Choose OpenGL format
switch(bpp)
{
default:
rt_assert(0 && "Unknown CGImage bpp");
case 32:
{
internal = GL_RGBA;
switch(info & kCGBitmapAlphaInfoMask)
{
case kCGImageAlphaPremultipliedFirst:
case kCGImageAlphaFirst:
case kCGImageAlphaNoneSkipFirst:
format = GL_BGRA;
break;
default:
format = GL_RGBA;
}
break;
}
case 24:
internal = format = GL_RGB;
break;
case 16:
internal = format = GL_LUMINANCE_ALPHA;
break;
case 8:
internal = format = GL_LUMINANCE;
break;
}
// Get a pointer to the uncompressed image data.
//
// This allows access to the original (possibly unpremultiplied) data, but any manipulation
// (such as scaling) has to be done manually. Contrast this with drawing the image
// into a CGBitmapContext, which allows scaling, but always forces premultiplication.
CFDataRef data = CGDataProviderCopyData(CGImageGetDataProvider(CGImage));
rt_assert(data);
pixels = (GLubyte *)CFDataGetBytePtr(data);
rt_assert(pixels);
// If the CGImage component layout isn't compatible with OpenGL, fix it.
// On the device, CGImage will generally return BGRA or RGBA.
// On the simulator, CGImage may return ARGB, depending on the file format.
if (format == GL_BGRA)
{
uint32_t *p = (uint32_t *)pixels;
int i, num = img->wide * img->high;
if ((info & kCGBitmapByteOrderMask) != kCGBitmapByteOrder32Host)
{
// Convert from ARGB to BGRA
for (i = 0; i < num; i++)
p[i] = (p[i] << 24) | ((p[i] & 0xFF00) << 8) | ((p[i] >> 8) & 0xFF00) | (p[i] >> 24);
}
// All current iPhoneOS devices support BGRA via an extension.
if (!renderer->extension[IMG_texture_format_BGRA8888])
{
format = GL_RGBA;
// Convert from BGRA to RGBA
for (i = 0; i < num; i++)
#if __LITTLE_ENDIAN__
p[i] = ((p[i] >> 16) & 0xFF) | (p[i] & 0xFF00FF00) | ((p[i] & 0xFF) << 16);
#else
p[i] = ((p[i] & 0xFF00) << 16) | (p[i] & 0xFF00FF) | ((p[i] >> 16) & 0xFF00);
#endif
}
}
// Determine if we need to pad this image to a power of two.
// There are multiple ways to deal with NPOT images on renderers that only support POT:
// 1) scale down the image to POT size. Loses quality.
// 2) pad up the image to POT size. Wastes memory.
// 3) slice the image into multiple POT textures. Requires more rendering logic.
//
// We are only dealing with a single image here, and pick 2) for simplicity.
//
// If you prefer 1), you can use CoreGraphics to scale the image into a CGBitmapContext.
POTWide = nextPOT(img->wide);
POTHigh = nextPOT(img->high);
if (!renderer->extension[APPLE_texture_2D_limited_npot] && (img->wide != POTWide || img->high != POTHigh))
{
GLuint dstBytes = POTWide * components;
GLubyte *temp = (GLubyte *)malloc(dstBytes * POTHigh);
for (y = 0; y < img->high; y++)
memcpy(&temp[y*dstBytes], &pixels[y*rowBytes], rowBytes);
img->s *= (float)img->wide/POTWide;
img->t *= (float)img->high/POTHigh;
img->wide = POTWide;
img->high = POTHigh;
pixels = temp;
rowBytes = dstBytes;
}
// For filters that sample texel neighborhoods (like blur), we must replicate
// the edge texels of the original input, to simulate CLAMP_TO_EDGE.
{
GLuint replicatew = MIN(MAX_FILTER_RADIUS, img->wide-imgWide);
GLuint replicateh = MIN(MAX_FILTER_RADIUS, img->high-imgHigh);
GLuint imgRow = imgWide * components;
for (y = 0; y < imgHigh; y++)
for (x = 0; x < replicatew; x++)
memcpy(&pixels[y*rowBytes+imgRow+x*components], &pixels[y*rowBytes+imgRow-components], components);
for (y = imgHigh; y < imgHigh+replicateh; y++)
memcpy(&pixels[y*rowBytes], &pixels[(imgHigh-1)*rowBytes], imgRow+replicatew*components);
}
if (img->wide <= renderer->maxTextureSize && img->high <= renderer->maxTextureSize)
{
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
// Set filtering parameters appropriate for this application (image processing on screen-aligned quads.)
// Depending on your needs, you may prefer linear filtering, or mipmap generation.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, internal, img->wide, img->high, 0, format, GL_UNSIGNED_BYTE, pixels);
}
if (temp) free(temp);
CFRelease(data);
CGImageRelease(CGImage);
img->texID = texID;
}