Java 如何更新方法中的字节数组,而无需再次运行它?
我有一个类(一个Java 如何更新方法中的字节数组,而无需再次运行它?,java,android,methods,ffmpeg,byte,Java,Android,Methods,Ffmpeg,Byte,我有一个类(一个AsyncTask),它进行图像处理并以大约200毫秒的间隔连续生成yuv字节 现在,我将这些yuv字节发送到另一种方法,使用FFmpeg帧记录器记录它们: public void recordYuvData() { byte[] yuv = getNV21(); System.out.println(yuv.length + " returned yuv bytes "); if (audioRecord == null |
AsyncTask
),它进行图像处理并以大约200毫秒的间隔连续生成yuv字节
现在,我将这些yuv字节发送到另一种方法,使用FFmpeg帧记录器记录它们:
public void recordYuvData() {
byte[] yuv = getNV21();
System.out.println(yuv.length + " returned yuv bytes ");
if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
startTime = System.currentTimeMillis();
return;
}
if (RECORD_LENGTH > 0) {
int i = imagesIndex++ % images.length;
yuvimage = images[i];
timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
}
/* get video data */
if (yuvimage != null && recording) {
((ByteBuffer) yuvimage.image[0].position(0)).put(yuv);
if (RECORD_LENGTH <= 0) {
try {
long t = 1000 * (System.currentTimeMillis() - startTime);
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
recorder.record(yuvimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
}
getNV21()
字节[]getNV21(位图){
int inputWidth=1024;
int-inputhweight=640;
int[]argb=新的int[inputWidth*inputHeight];
getPixels(argb,0,inputWidth,0,0,inputWidth,inputHeight);
System.out.println(argb.length+“@getpixels”);
字节[]yuv=新字节[inputWidth*inputWidth*3/2];
编码YUV420SP(yuv,argb,输入宽度,输入宽度);
返回yuv;
}
void encodeYUV420SP(字节[]yuv420sp,整数[]argb,整数宽度,整数高度){
最终整数帧大小=宽度*高度;
int-yIndex=0;
int uvIndex=帧大小;
System.out.println(yuv420sp.length+“@encoding”+frameSize);
int a,R,G,B,Y,U,V;
int指数=0;
对于(int j=0;j请张贴您的按钮代码。另外,请发布您的
getNV21
method为什么调用getNV21(),而不使用任何参数?它需要一个位图参数。这在代码的不同部分中存在,…。很抱歉,它类似于字节[]getNV21(){Bitmap Bitmap=Bitmapdata()…}请发布您的按钮代码。另外,请发布您的getNV21
method为什么不带任何参数调用getNV21()?它需要一个位图参数。该参数存在于代码的不同部分,…。很抱歉,它类似于字节[]getNV21(){Bitmap Bitmap=Bitmapdata()…}
record.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
recordYuvdata();
startRecording();
byte[] getNV21(Bitmap bitmap) {
int inputWidth = 1024;
int inputHeight = 640;
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
System.out.println(argb.length + "@getpixels ");
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
System.out.println(yuv420sp.length + " @encoding " + frameSize);
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}