Android MediaCodec-如何同步混合来自两个解码器MediaCodec对象的音频样本?

Android MediaCodec-如何同步混合来自两个解码器MediaCodec对象的音频样本?,android,audio,android-mediacodec,Android,Audio,Android Mediacodec,我当前的项目要求我将视频的音频轨迹与音频文件混合,我已通过以下代码实现了这一点: while (mCopyAudio && !audioInternalDecoderDone && pendingInternalAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) { int decoderOutputBufferIndex

我当前的项目要求我将视频的音频轨迹与音频文件混合,我已通过以下代码实现了这一点:

while (mCopyAudio && !audioInternalDecoderDone && pendingInternalAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) {
    int decoderOutputBufferIndex = this.internalAudioDecoder.dequeueOutputBuffer(audioInternalDecoderOutputBufferInfo, TIMEOUT_USEC);
    if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
        break;
    }
    if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
        audioInternalDecoderOutputBuffers = this.internalAudioDecoder.getOutputBuffers();
        break;
    }
    if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
        decoderOutputAudioFormat = this.internalAudioDecoder.getOutputFormat();
        iDecoderOutputChannelNum = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
        iDecoderOutputAudioSampleRate = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
        break;
    }
    if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {           
        //Not in indent because I couldn't fit it in the editor
      this.internalAudioDecoder.releaseOutputBuffer(decoderOutputBufferIndex,
            false);
        break;
    }
    pendingInternalAudioDecoderOutputBufferIndex = decoderOutputBufferIndex;
    audioDecodedFrameCount++;
    break;
}
while (mCopyAudio && !audioExternalDecoderDone && pendingExternalAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) {
    int decoderOutputBufferIndex = this.externalAudioDecoder.dequeueOutputBuffer(audioExternalDecoderOutputBufferInfo, TIMEOUT_USEC);
    if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
        break;
    }
    if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
        audioExternalDecoderOutputBuffers = this.externalAudioDecoder.getOutputBuffers();
        break;
    }
    if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
        decoderOutputAudioFormat = this.externalAudioDecoder.getOutputFormat();
        eDecoderOutputChannelNum = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
        eDecoderOutputAudioSampleRate = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
        break;
    }
    if ((audioExternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
        //Not in indent because I couldn't fit it in the editor
     this.externalAudioDecoder.releaseOutputBuffer(decoderOutputBufferIndex,
                false);
        break;
    }
    pendingExternalAudioDecoderOutputBufferIndex = decoderOutputBufferIndex;
    audioDecodedFrameCount++;
    break;
}

while (mCopyAudio && pendingInternalAudioDecoderOutputBufferIndex != -1 && pendingExternalAudioDecoderOutputBufferIndex != -1) {
    int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);
    if (encoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
        break;
    }
    ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
    int size = audioInternalDecoderOutputBufferInfo.size;
    long presentationTime = audioInternalDecoderOutputBufferInfo.presentationTimeUs - musicStartUs;

    if (size >= 0) {
        ByteBuffer iDecoderOutputBuffer = audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex].duplicate();
        ByteBuffer eDecoderOutputBuffer = audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex].duplicate();
        byte[] initContents = new byte[ audioInternalDecoderOutputBufferInfo.size];
        byte[] eInitContents = new byte[audioExternalDecoderOutputBufferInfo.size];
        iDecoderOutputBuffer.get(initContents, 0, audioInternalDecoderOutputBufferInfo.size);
        eDecoderOutputBuffer.get(eInitContents, 0, audioExternalDecoderOutputBufferInfo.size);

        /*
        The following is my attempt at compensating for different buffer sizes and timestamps - when the internal and external decoder buffer infos' timestampUs don't sync up with each other. This hasn't gone well.

        if(audioExternalDecoderOutputBufferInfo.presentationTimeUs <= totalTime) {
            if (eInitContents.length > initContents.length) {
                SliceAndRemainder sar = sliceArray(eInitContents, initContents.length - remainderForNextBB.length);
                Log.i("slice_and_remainder", sar.slice.length+" "+sar.remainder.length);
                if(remainderForNextBB.length == initContents.length) {
                    eInitContents = remainderForNextBB;
                    remainderForNextBB = new byte[]{};
                } else {
                    eInitContents = concatTwoArrays(remainderForNextBB, sar.slice);
                    remainderForNextBB = sar.remainder;
                }
            }else if(eInitContents.length < initContents.length) {
                eInitContents = minorUpsamplingFrom44kTo48k(eInitContents);
            }
        }

        For brevity's sake, this code is commented out, so assume the ideal condition that the timestamps in both decoders are synced up properly
        */

        byte[] alteredIContents = scaleByteArrayByScalar(initContents, internalAudioGain);
        byte[] alteredEContents = scaleByteArrayByScalar(eInitContents, externalAudioGain);
        ByteBuffer endByteBuffer;
        if(audioExternalDecoderOutputBufferInfo.presentationTimeUs <= totalTime) {
            byte[] res = mixTwoByteArrays(alteredIContents, alteredEContents, alteredEContents.length);
            Log.i("bytebuffer_mixed_len", res.length+"");
            endByteBuffer = ByteBuffer.wrap(res);
        } else {
            endByteBuffer = ByteBuffer.wrap(alteredIContents);
        }
        iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
        iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset + size);
        encoderInputBuffer.position(0);
        encoderInputBuffer.put(endByteBuffer);
        if((presentationTime < totalTime)) {
            Log.i("presentation_time", presentationTime+" "+totalTime);
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioInternalDecoderOutputBufferInfo.flags);
        } else {
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
        }
    }
    this.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex, false);
    this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex, false);
    pendingInternalAudioDecoderOutputBufferIndex = -1;
    pendingExternalAudioDecoderOutputBufferIndex = -1;
    if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
        lastAudioDecoderFinalFrameTimestamp += temporaryAudioDecoderTimestamp + 33333;
        temporaryAudioDecoderTimestamp = 0;
        audioDecoderTimestampOffset = lastAudioDecoderFinalFrameTimestamp;
        audioInternalDecoderDone = true;
        audioExternalDecoderDone = true;
    }
    break;
}
正如上面注释掉的代码所述,这对于时间戳彼此同步的音频曲目/文件非常有效。我的问题是,如果他们不喜欢最近,音频曲目的时间戳是26666的倍数,音频文件的时间戳是27000的倍数

我曾想过分别处理音频曲目,然后将结果与原始视频曲目合并,但这是错误的;这将对处理时间产生不利影响,因此我宁愿实时执行,并将该解决方案作为最后手段


有什么方法可以实时执行吗?

只需准备一个
ArrayList()
即可将外部解码器样本中的所有字节放入。然后从
ArrayList
中取出第一个4096字节(或内部解码器的缓冲区信息的
大小是多少)与内部解码器的样本混合(然后从所述
ArrayList
的索引0中移除所述样本数)

private ArrayList<Byte> externalBytesArrayList = new ArrayList<Byte>();

//All the other stuff omitted

while (mCopyAudio && pendingInternalAudioDecoderOutputBufferIndex != -1 && pendingExternalAudioDecoderOutputBufferIndex != -1) {
    int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);

    ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
    int size = audioInternalDecoderOutputBufferInfo.size;
    long presentationTime = audioInternalDecoderOutputBufferInfo.presentationTimeUs - musicStartUs;
    if (size >= 0) {

        ByteBuffer iDecoderOutputBuffer = audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex].duplicate();
        ByteBuffer eDecoderOutputBuffer = audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex].duplicate();

        byte[] initContents = new byte[ audioInternalDecoderOutputBufferInfo.size];
        byte[] eInitContents = new byte[audioExternalDecoderOutputBufferInfo.size];

        iDecoderOutputBuffer.get(initContents, 0, audioInternalDecoderOutputBufferInfo.size);
        eDecoderOutputBuffer.get(eInitContents, 0, audioExternalDecoderOutputBufferInfo.size);
        externalBytesArrayList.addAll(Bytes.asList(eInitContents));

        byte[] eContents;
        //Here: take the first 4096 bytes from the external decoder's sample, save the rest in the ArrayList
        //I need to replace 4096 with audioInternalDecoderOutputBufferInfo.presentationTimeUs - though complications might follow.
        if(!(4096 > externalBytesArrayList.size())) {
            List<Byte> subset = externalBytesArrayList.subList(0, 4096);
            eContents = Bytes.toArray(subset);
            externalBytesArrayList.subList(0, 4096).clear();
        }else {
            eContents = new byte[audioInternalDecoderOutputBufferInfo.size];
        }

        byte[] alteredIContents = scaleByteArrayByScalar(initContents, internalAudioGain);
        byte[] alteredEContents = scaleByteArrayByScalar(eContents, externalAudioGain);
        ByteBuffer endByteBuffer;
        byte[] res = mixTwoByteArrays(alteredIContents, alteredEContents, alteredEContents.length);
        endByteBuffer = ByteBuffer.wrap(res);

        iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
        iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset + size);

        encoderInputBuffer.position(0);
        encoderInputBuffer.put(endByteBuffer);

        if((presentationTime < totalTime)) {
            Log.i("presentation_time", presentationTime+" "+totalTime);
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioInternalDecoderOutputBufferInfo.flags);
        } else {
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
        }
    }
    this.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex, false);
    this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex, false);
    pendingInternalAudioDecoderOutputBufferIndex = -1;
    pendingExternalAudioDecoderOutputBufferIndex = -1;

    if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
        lastAudioDecoderFinalFrameTimestamp += temporaryAudioDecoderTimestamp + 33333;
        temporaryAudioDecoderTimestamp = 0;
        audioDecoderTimestampOffset = lastAudioDecoderFinalFrameTimestamp;
        audioInternalDecoderDone = true;
        audioExternalDecoderDone = true;
    }
    break;
}
private ArrayList externalbytesaraylist=new ArrayList();
//其他的都省略了
而(mCopyAudio&pendingInternalAudioCodeRoutPutBufferIndex!=-1&pendingExternalAudioDecoderOutputBufferIndex!=-1){
int encoderInputBufferIndex=audioEncoder.dequeueInputBuffer(超时\u USEC);
ByteBuffer encoderInputBuffer=音频EncoderInputBuffers[encoderInputBufferIndex];
int size=audioInternalDecoderOutputBufferInfo.size;
long presentationTime=audioInternalDecoderOutputBufferInfo.presentationTimeUs-musicStartUs;
如果(大小>=0){
ByteBuffer iDecoderOutputBuffer=audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex]。重复();
ByteBuffer eDecoderOutputBuffer=audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex]。重复();
byte[]initContents=新字节[audioInternalDecoderOutputBufferInfo.size];
byte[]eInitContents=新字节[audioExternalDecoderOutputBufferInfo.size];
获取(initContents,0,audioInternalDecoderOutputBufferInfo.size);
get(eInitContents,0,audioExternalDecoderOutputBufferInfo.size);
addAll(Bytes.asList(eInitContents));
字节[]的内容;
//这里:从外部解码器的示例中获取前4096个字节,将其余的保存在ArrayList中
//我需要用audioInternalDecoderOutputBufferInfo.presentationTimeUs替换4096,尽管可能会出现并发症。
如果(!(4096>externalBytesArrayList.size()){
List subset=externalBytesArrayList.subList(04096);
eContents=Bytes.toArray(子集);
subList(04096).clear();
}否则{
eContents=新字节[audioInternalDecoderOutputBufferInfo.size];
}
byte[]AlteredContents=scaleByteArrayByScalar(初始化内容,内部音频增益);
字节[]alteredEContents=scaleByteArrayByScalar(eContents,externalAudioGain);
ByteBuffer endByteBuffer;
byte[]res=mixtwobytearray(alteredContent,alteredEContents,alteredEContents.length);
endByteBuffer=ByteBuffer.wrap(res);
iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset+大小);
编码器输入缓冲区位置(0);
encoderInputBuffer.put(endByteBuffer);
如果((呈现时间<总时间)){
Log.i(“表示时间”,表示时间+“”+总时间);
audioEncoder.queueInputBuffer(encoderInputBufferIndex,0,大小,presentationTime,audioInternalDecoderOutputBufferInfo.flags);
}否则{
audioEncoder.queueInputBuffer(encoderInputBufferIndex,0,0,MediaCodec.BUFFER\u标志\u结束\u流);
}
}
这个.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex,false);
this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex,false);
PendingInternalAudioCodeRoutPutBufferIndex=-1;
pendingExternalAudioDecoderOutputBufferIndex=-1;
if((audioInternalDecoderOutputBufferInfo.flags&MediaCodec.BUFFER\u FLAG\u END\u OF \u STREAM)!=0){
lastAudioDecoderFinalFrameTimestamp+=临时AudioDecoderTimeStamp+33333;
暂时性音频解码干扰=0;
audioDecoderTimestampOffset=lastAudioDecoderFinalFrameTimestamp;
audioInternalDecoderDone=true;
AudioExternalDecoderOne=真;
}
打破
}

您能给出混音两种音频的完整源代码吗?
private ArrayList<Byte> externalBytesArrayList = new ArrayList<Byte>();

//All the other stuff omitted

while (mCopyAudio && pendingInternalAudioDecoderOutputBufferIndex != -1 && pendingExternalAudioDecoderOutputBufferIndex != -1) {
    int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);

    ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
    int size = audioInternalDecoderOutputBufferInfo.size;
    long presentationTime = audioInternalDecoderOutputBufferInfo.presentationTimeUs - musicStartUs;
    if (size >= 0) {

        ByteBuffer iDecoderOutputBuffer = audioInternalDecoderOutputBuffers[pendingInternalAudioDecoderOutputBufferIndex].duplicate();
        ByteBuffer eDecoderOutputBuffer = audioExternalDecoderOutputBuffers[pendingExternalAudioDecoderOutputBufferIndex].duplicate();

        byte[] initContents = new byte[ audioInternalDecoderOutputBufferInfo.size];
        byte[] eInitContents = new byte[audioExternalDecoderOutputBufferInfo.size];

        iDecoderOutputBuffer.get(initContents, 0, audioInternalDecoderOutputBufferInfo.size);
        eDecoderOutputBuffer.get(eInitContents, 0, audioExternalDecoderOutputBufferInfo.size);
        externalBytesArrayList.addAll(Bytes.asList(eInitContents));

        byte[] eContents;
        //Here: take the first 4096 bytes from the external decoder's sample, save the rest in the ArrayList
        //I need to replace 4096 with audioInternalDecoderOutputBufferInfo.presentationTimeUs - though complications might follow.
        if(!(4096 > externalBytesArrayList.size())) {
            List<Byte> subset = externalBytesArrayList.subList(0, 4096);
            eContents = Bytes.toArray(subset);
            externalBytesArrayList.subList(0, 4096).clear();
        }else {
            eContents = new byte[audioInternalDecoderOutputBufferInfo.size];
        }

        byte[] alteredIContents = scaleByteArrayByScalar(initContents, internalAudioGain);
        byte[] alteredEContents = scaleByteArrayByScalar(eContents, externalAudioGain);
        ByteBuffer endByteBuffer;
        byte[] res = mixTwoByteArrays(alteredIContents, alteredEContents, alteredEContents.length);
        endByteBuffer = ByteBuffer.wrap(res);

        iDecoderOutputBuffer.position(audioInternalDecoderOutputBufferInfo.offset);
        iDecoderOutputBuffer.limit(audioInternalDecoderOutputBufferInfo.offset + size);

        encoderInputBuffer.position(0);
        encoderInputBuffer.put(endByteBuffer);

        if((presentationTime < totalTime)) {
            Log.i("presentation_time", presentationTime+" "+totalTime);
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioInternalDecoderOutputBufferInfo.flags);
        } else {
            audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
        }
    }
    this.internalAudioDecoder.releaseOutputBuffer(pendingInternalAudioDecoderOutputBufferIndex, false);
    this.externalAudioDecoder.releaseOutputBuffer(pendingExternalAudioDecoderOutputBufferIndex, false);
    pendingInternalAudioDecoderOutputBufferIndex = -1;
    pendingExternalAudioDecoderOutputBufferIndex = -1;

    if ((audioInternalDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
        lastAudioDecoderFinalFrameTimestamp += temporaryAudioDecoderTimestamp + 33333;
        temporaryAudioDecoderTimestamp = 0;
        audioDecoderTimestampOffset = lastAudioDecoderFinalFrameTimestamp;
        audioInternalDecoderDone = true;
        audioExternalDecoderDone = true;
    }
    break;
}