Android NDK音频回调
开发一款实时合成的Android应用程序。我用NDK生成波形,用Java来做所有的UI。我有以下资料:Android NDK音频回调,android,audio,callback,android-ndk,latency,Android,Audio,Callback,Android Ndk,Latency,开发一款实时合成的Android应用程序。我用NDK生成波形,用Java来做所有的UI。我有以下资料: private Thread audioThread; @Override protected void onCreate(Bundle savedInstanceState) { // UI Initializations here // Audio Thread creation: if (audioThread == null) { a
private Thread audioThread;
@Override
protected void onCreate(Bundle savedInstanceState) {
// UI Initializations here
// Audio Thread creation:
if (audioThread == null) {
audioThread = new Thread() {
public void run() {
setPriority(Thread.MAX_PRIORITY);
JNIWrapper.runProcess();
}
};
audioThread.start();
}
}
<>我的C++文件:
void Java_com_rfoo_runProcess() {
OPENSL_STREAM *p = android_OpenAudioDevice(SAMPLE_RATE, 0, 2, FRAME_SIZE);
double outBuffer[FRAME_SIZE];
while (true) {
// Audio Processing code happens HERE
// Write to output buffer here
android_AudioOut(p, outBuffer, FRAME_SIZE);
}
android_CloseAudioDevice(p);
}
如果我在runProcess
代码中没有做大量的信号处理工作,那么这一切都是groovy。因为有很多工作要做,所以当我尝试更改信号处理代码的参数(例如频率、ADSR包络、滤波器截止频率等)时,我的UI延迟非常高,并导致点击
有哪些方法可以减少这种延迟?在iOS和PortAudio中,当时间间隔/缓冲区被填满时,通常会调用音频回调。我尝试搜索Android中存在的类似音频回调,但找不到。我是否应该编写自己的计时器来调用处理代码
谢谢 是的,所以我的回调设置完全错误。。。事实上,我甚至没有设置回调 为了纠正这一点,我在网上遵循了一些技巧,并创建了一个处理回调:
// Define the callback:
typedef void (*opensl_callback) (void *context, int buffer_frames,
int output_channels, short *output_buffer);
// Declare callback:
static opensl_callback myAudioCallback;
// Define custom callback:
static void audioCallback(void *context, int buffer_frames,
int output_channels, short *output_buffer) {
// Get my object's data
AudioData *data = (AudioData *)context;
// Process here! Then:
output_buffer[i] = final_sample;
}
static void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *context) {
OPENSL_STREAM *p = (OPENSL_STREAM *) context;
short *currentOutputBuffer = p->outputBuffer +
(p->outputIndex % p->numFrames) * p->outputChannels;
memset(currentOutputBuffer, 0, p->callbackBufferFrames * p->outputChannels * sizeof(short));
p->callback(p->context, p->sampleRate, p->callbackBufferFrames,
p->inputChannels, p->dummyBuffer,
p->outputChannels, currentOutputBuffer);
}
(*bq)->Enqueue(bq, currentOutputBuffer, p->callbackBufferFrames * p->outputChannels * sizeof(short));
p->outputIndex = nextIndex(p->outputIndex, p->callbackBufferFrames);
}
我如何声明/初始化OpenSL流:
jboolean Java_com_rfoo_AudioProcessor_runProcess(JNIEnv *, jobject,
int srate, int numFrames) {
myAudioCallback = audioCallback;
OPENSL_Stream *p = opensl_openDevice(srate, 2, numFrames, myAudioCallback, audioData);
// Check if successful initialization
if (!p) return JNI_FALSE;
// Start our process:
opensl_startProcess(p);
return JNI_TRUE;
}
基本上,opensl\u openDevice()
和opensl\u startProcess()
所做的:
OPENSL_STREAM *opensl_openDevice(int sampleRate, int outChans, int numFrames, opensl_callback cb, void *data) {
if (!cb) {
return NULL;
}
if (outChans == 0) {
return NULL;
}
SLuint32 srmillihz = convertSampleRate(sampleRate);
if (srmillihz < 0) {
return NULL;
}
OPENSL_STREAM *p = (OPENSL_STREAM *) calloc(1, sizeof(OPENSL_STREAM));
if (!p) {
return NULL;
}
p->callback = cb;
p->data = data;
p->isRunning = 0;
p->outputChannels = outChans;
p->sampleRate = sampleRate;
p->thresholdMillis = 750.0 * numFrames / sampleRate;
p->outputBuffer = NULL;
p->dummyBuffer = NULL;
p->numFrames = numFrames;
p->outputBufferFrames = OUTPUT_BUFFERS * numFrames;
if (openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
opensl_close(p);
return NULL;
}
if (outChans) {
int outBufSize = p->outputBufferFrames * outChans;
if (!(openSLPlayOpen(p, srmillihz) == SL_RESULT_SUCCESS &&
(p->outputBuffer = (short *) calloc(outBufSize, sizeof(short))))) {
opensl_close(p);
return NULL;
}
}
LOGI("OpenSL_Stream", "Created OPENSL_STREAM(%d, %d, %d, %d)",
sampleRate, inChans, outChans, callbackBufferFrames);
LOGI("OpenSL_Stream", "numBuffers: %d", OUTPUT_BUFFERS);
return p;
}
当我完成整理后,我将链接我的github opensl_流代码示例,以便像我这样的其他Noob可以轻松找到可用的示例。干杯!:) 是的,所以我的回调设置完全错误。。。事实上,我甚至没有设置回调 为了纠正这一点,我在网上遵循了一些技巧,并创建了一个处理回调:
// Define the callback:
typedef void (*opensl_callback) (void *context, int buffer_frames,
int output_channels, short *output_buffer);
// Declare callback:
static opensl_callback myAudioCallback;
// Define custom callback:
static void audioCallback(void *context, int buffer_frames,
int output_channels, short *output_buffer) {
// Get my object's data
AudioData *data = (AudioData *)context;
// Process here! Then:
output_buffer[i] = final_sample;
}
static void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *context) {
OPENSL_STREAM *p = (OPENSL_STREAM *) context;
short *currentOutputBuffer = p->outputBuffer +
(p->outputIndex % p->numFrames) * p->outputChannels;
memset(currentOutputBuffer, 0, p->callbackBufferFrames * p->outputChannels * sizeof(short));
p->callback(p->context, p->sampleRate, p->callbackBufferFrames,
p->inputChannels, p->dummyBuffer,
p->outputChannels, currentOutputBuffer);
}
(*bq)->Enqueue(bq, currentOutputBuffer, p->callbackBufferFrames * p->outputChannels * sizeof(short));
p->outputIndex = nextIndex(p->outputIndex, p->callbackBufferFrames);
}
我如何声明/初始化OpenSL流:
jboolean Java_com_rfoo_AudioProcessor_runProcess(JNIEnv *, jobject,
int srate, int numFrames) {
myAudioCallback = audioCallback;
OPENSL_Stream *p = opensl_openDevice(srate, 2, numFrames, myAudioCallback, audioData);
// Check if successful initialization
if (!p) return JNI_FALSE;
// Start our process:
opensl_startProcess(p);
return JNI_TRUE;
}
基本上,opensl\u openDevice()
和opensl\u startProcess()
所做的:
OPENSL_STREAM *opensl_openDevice(int sampleRate, int outChans, int numFrames, opensl_callback cb, void *data) {
if (!cb) {
return NULL;
}
if (outChans == 0) {
return NULL;
}
SLuint32 srmillihz = convertSampleRate(sampleRate);
if (srmillihz < 0) {
return NULL;
}
OPENSL_STREAM *p = (OPENSL_STREAM *) calloc(1, sizeof(OPENSL_STREAM));
if (!p) {
return NULL;
}
p->callback = cb;
p->data = data;
p->isRunning = 0;
p->outputChannels = outChans;
p->sampleRate = sampleRate;
p->thresholdMillis = 750.0 * numFrames / sampleRate;
p->outputBuffer = NULL;
p->dummyBuffer = NULL;
p->numFrames = numFrames;
p->outputBufferFrames = OUTPUT_BUFFERS * numFrames;
if (openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
opensl_close(p);
return NULL;
}
if (outChans) {
int outBufSize = p->outputBufferFrames * outChans;
if (!(openSLPlayOpen(p, srmillihz) == SL_RESULT_SUCCESS &&
(p->outputBuffer = (short *) calloc(outBufSize, sizeof(short))))) {
opensl_close(p);
return NULL;
}
}
LOGI("OpenSL_Stream", "Created OPENSL_STREAM(%d, %d, %d, %d)",
sampleRate, inChans, outChans, callbackBufferFrames);
LOGI("OpenSL_Stream", "numBuffers: %d", OUTPUT_BUFFERS);
return p;
}
当我完成整理后,我将链接我的github opensl_流代码示例,以便像我这样的其他Noob可以轻松找到可用的示例。干杯!:) 您是否尝试过使用openSLES进行音频播放?它可能是尽可能低的延迟,并且还使用回调机制对下一个缓冲区进行排队。@WLGfx这就是我正在做的。仔细检查后,我将openSLES驱动程序设置错误。你有没有试过使用openSLES进行音频播放?它可能是尽可能低的延迟,并且还使用回调机制对下一个缓冲区进行排队。@WLGfx这就是我正在做的。仔细检查后,我将openSLES驱动程序设置错误。我得重做一遍