Android 将YUV_420_888转换为JPEG并保存文件结果扭曲图像
我使用了git repo中提供的Android 将YUV_420_888转换为JPEG并保存文件结果扭曲图像,android,image,android-camera,Android,Image,Android Camera,我使用了git repo中提供的ImageUtil类:(注意,实现在camera\u preview\u imp分支中)来实现帧预览回调。ImageReader设置为预览ImageFormat.YUV_420_888格式的帧,该格式将使用ImageUtil类转换为ImageFormat.JPEG,并将其发送到帧回调。演示应用程序每隔50帧将回调中的一帧保存到一个文件中。所有保存的帧图像都会失真,如下所示: 如果我已将ImageReader更改为使用ImageFormat.JPEG,请在Came
ImageUtil
类:(注意,实现在camera\u preview\u imp
分支中)来实现帧预览回调。ImageReader
设置为预览ImageFormat.YUV_420_888
格式的帧,该格式将使用ImageUtil
类转换为ImageFormat.JPEG
,并将其发送到帧回调。演示应用程序每隔50帧将回调中的一帧保存到一个文件中。所有保存的帧图像都会失真,如下所示:
如果我已将ImageReader
更改为使用ImageFormat.JPEG
,请在Camera2
中执行以下更改:
mPreviewImageReader = ImageReader.newInstance(previewSize.getWidth(),
previewSize.getHeight(), ImageFormat.JPEG, /* maxImages */ 2);
mCamera.createCaptureSession(Arrays.asList(surface, mPreviewImageReader.getSurface()),
mSessionCallback, null);
图像正常,没有任何失真,但帧速率显著下降,视图开始滞后。因此,我认为
ImageUtil
类没有正确转换。更新ImageUtil:
public final class ImageUtil {
public static byte[] NV21toJPEG(byte[] nv21, int width, int height, int quality) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuv = new YuvImage(nv21, ImageFormat.NV21, width, height, null);
yuv.compressToJpeg(new Rect(0, 0, width, height), quality, out);
return out.toByteArray();
}
// nv12: true = NV12, false = NV21
public static byte[] YUV_420_888toNV(ByteBuffer yBuffer, ByteBuffer uBuffer, ByteBuffer vBuffer, boolean nv12) {
byte[] nv;
int ySize = yBuffer.remaining();
int uSize = uBuffer.remaining();
int vSize = vBuffer.remaining();
nv = new byte[ySize + uSize + vSize];
yBuffer.get(nv, 0, ySize);
if (nv12) {//U and V are swapped
vBuffer.get(nv, ySize, vSize);
uBuffer.get(nv, ySize + vSize, uSize);
} else {
uBuffer.get(nv, ySize , uSize);
vBuffer.get(nv, ySize + uSize, vSize);
}
return nv;
}
public static byte[] YUV_420_888toI420SemiPlanar(ByteBuffer yBuffer, ByteBuffer uBuffer, ByteBuffer vBuffer,
int width, int height, boolean deInterleaveUV) {
byte[] data = YUV_420_888toNV(yBuffer, uBuffer, vBuffer, deInterleaveUV);
int size = width * height;
if (deInterleaveUV) {
byte[] buffer = new byte[3 * width * height / 2];
// De-interleave U and V
for (int i = 0; i < size / 4; i += 1) {
buffer[i] = data[size + 2 * i + 1];
buffer[size / 4 + i] = data[size + 2 * i];
}
System.arraycopy(buffer, 0, data, size, size / 2);
} else {
for (int i = size; i < data.length; i += 2) {
byte b1 = data[i];
data[i] = data[i + 1];
data[i + 1] = b1;
}
}
return data;
}
}
!!!不要忘记在处理后关闭图像强>
image.close();
@volodymyr-kulyk提供的解决方案没有考虑图像中平面的行间距。下面的代码实现了这个技巧(
image
属于android.media.image
类型):
以及实现:
private static byte[] NV21toJPEG(byte[] nv21, int width, int height, int quality) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuv = new YuvImage(nv21, ImageFormat.NV21, width, height, null);
yuv.compressToJpeg(new Rect(0, 0, width, height), quality, out);
return out.toByteArray();
}
private static byte[] YUV420toNV21(Image image) {
Rect crop = image.getCropRect();
int format = image.getFormat();
int width = crop.width();
int height = crop.height();
Image.Plane[] planes = image.getPlanes();
byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
byte[] rowData = new byte[planes[0].getRowStride()];
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
channelOffset = width * height + 1;
outputStride = 2;
break;
case 2:
channelOffset = width * height;
outputStride = 2;
break;
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(data, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
data[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
}
return data;
}
private static byte[]NV21toJPEG(byte[]nv21,int-width,int-height,int-quality){
ByteArrayOutputStream out=新建ByteArrayOutputStream();
YuvImage yuv=新的YuvImage(nv21,ImageFormat.nv21,宽度,高度,null);
yuv.compressToJpeg(新的矩形(0,0,宽度,高度),质量,输出);
return out.toByteArray();
}
专用静态字节[]YUV420toNV21(图像){
Rect-crop=image.getCropRect();
int format=image.getFormat();
int width=crop.width();
int height=裁剪高度();
Image.Plane[]planes=Image.getPlanes();
字节[]数据=新字节[width*height*ImageFormat.getBitsPerPixel(format)/8];
byte[]rowData=新字节[planes[0]。getRowStride();
int channelOffset=0;
int outputStride=1;
对于(int i=0;i>移位;
int h=高度>>移位;
buffer.position(rowStride*(crop.top>>shift)+pixelStride*(crop.left>>shift));
对于(int行=0;行
方法来自以下内容。Camera2 YUV_420_888到Java(Android)中的Jpeg格式:
注意:处理图像旋转问题。
最终图像是否存在失真?图像是否写入文件?我的问题是错误的,经过编辑以消除混淆。在哪里可以看到onImageAvailable(ImageReader reader)
(ImageReader.OnImageAvailableListener)方法?在mOnPreviewAvailableListener
变量中的Camera2
类中。请链接:)我找不到它。聊天室中发布的更新:我花了整整一个下午的时间才看到这篇文章。我希望给你100票!!!!顺便说一句,转换速度有点慢,我的设备每帧接收40毫秒~140毫秒。@SiraLam感谢您的反馈。如果你愿意,你可以就这个问题悬赏,并给它答案;关于处理,是的,你是对的,这是一个缓慢的过程,图像对话本质上是这样的。为了能够更快地处理更多的帧,需要在线程之间进行一定程度的多线程同步,以实现流水线效果。由于帧处理大约是100毫秒,用户不会真正感觉到滞后,如果观看相机SrimaI会考虑:P,用户不会感觉到任何预览的滞后,因为我的预览是使用另一个不同于我使用这些帧的表面。一旦我使用另一个线程来做转换工作,预览仍然是非常顺利的。事实上,我正在使用这些帧进行人脸检测,并在这些人脸上覆盖一些东西。。。遗憾的是,使用Camera 1 API,这同时非常简单和快速:(小米米A1,应用程序正在崩溃,图像阅读器使用JPEG格式,转换为YUV_420_888,然后使用您的方法。超级感谢。这不起作用:(此后格式仍然不是JPEG)。
data = NV21toJPEG(YUV420toNV21(image), image.getWidth(), image.getHeight(), 100);
private static byte[] NV21toJPEG(byte[] nv21, int width, int height, int quality) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuv = new YuvImage(nv21, ImageFormat.NV21, width, height, null);
yuv.compressToJpeg(new Rect(0, 0, width, height), quality, out);
return out.toByteArray();
}
private static byte[] YUV420toNV21(Image image) {
Rect crop = image.getCropRect();
int format = image.getFormat();
int width = crop.width();
int height = crop.height();
Image.Plane[] planes = image.getPlanes();
byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
byte[] rowData = new byte[planes[0].getRowStride()];
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
channelOffset = width * height + 1;
outputStride = 2;
break;
case 2:
channelOffset = width * height;
outputStride = 2;
break;
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(data, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
data[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
}
return data;
}
@Override
public void onImageAvailable(ImageReader reader){
Image image = null;
try {
image = reader.acquireLatestImage();
if (image != null) {
byte[] nv21;
ByteBuffer yBuffer = mImage.getPlanes()[0].getBuffer();
ByteBuffer uBuffer = mImage.getPlanes()[1].getBuffer();
ByteBuffer vBuffer = mImage.getPlanes()[2].getBuffer();
int ySize = yBuffer.remaining();
int uSize = uBuffer.remaining();
int vSize = vBuffer.remaining();
nv21 = new byte[ySize + uSize + vSize];
//U and V are swapped
yBuffer.get(nv21, 0, ySize);
vBuffer.get(nv21, ySize, vSize);
uBuffer.get(nv21, ySize + vSize, uSize);
String savingFilepath = getYUV2jpg(nv21);
}
} catch (Exception e) {
Log.w(TAG, e.getMessage());
}finally{
image.close();// don't forget to close
}
}
public String getYUV2jpg(byte[] data) {
File imageFile = new File("your parent directory", "picture.jpeg");//no i18n
BufferedOutputStream bos = null;
try {
bos = new BufferedOutputStream(new FileOutputStream(imageFile));
bos.write(data);
bos.flush();
bos.close();
} catch (IOException e) {
return e.getMessage();
} finally {
try {
if (bos != null) {
bos.close();
}
} catch (Exception e) {
e.printStackTrace();
}
}
return imageFile.getAbsolutePath();
}