C 使用libswresample重新采样音频从48000到44100

C 使用libswresample重新采样音频从48000到44100,c,audio,ffmpeg,resampling,libav,C,Audio,Ffmpeg,Resampling,Libav,我正在尝试使用libswresampleapi将解码音频帧从48KHz重新采样到44.1KHz。我的代码如下: // 'frame' is the original decoded audio frame AVFrame *output_frame = av_frame_alloc(); // Without this, there is no sound at all at the output (PTS stuff I guess) av_frame_copy_props(output_f

我正在尝试使用libswresampleapi将解码音频帧从48KHz重新采样到44.1KHz。我的代码如下:

// 'frame' is the original decoded audio frame
AVFrame *output_frame = av_frame_alloc();

// Without this, there is no sound at all at the output (PTS stuff I guess)
av_frame_copy_props(output_frame, frame);

output_frame->channel_layout = audioStream->codec->channel_layout;
output_frame->sample_rate = audioStream->codec->sample_rate;
output_frame->format = audioStream->codec->sample_fmt;

SwrContext *swr;
// Configure resampling context
swr = swr_alloc_set_opts(NULL,  // we're allocating a new context
                         AV_CH_LAYOUT_STEREO,  // out_ch_layout
                         AV_SAMPLE_FMT_FLTP,     // out_sample_fmt
                         44100,                // out_sample_rate
                         AV_CH_LAYOUT_STEREO,  // in_ch_layout
                         AV_SAMPLE_FMT_FLTP,   // in_sample_fmt
                         48000,                // in_sample_rate
                         0,                    // log_offset
                         NULL);                // log_ctx
// Initialize resampling context
swr_init(swr);

// Perform conversion
swr_convert_frame(swr, output_frame, frame);

// Close resampling context
swr_close(swr);
swr_free(&swr);
// Free the original frame and replace it with the new one
av_frame_unref(frame);
return output_frame;
有了这段代码,我可以在输出端听到音频,但也有噪音。从我所读到的内容来看,这段没有av_frame_copy_props()的代码应该足够了,但由于某些原因它不起作用。有什么想法吗

编辑:输入流使用AAC对音频进行编码,采样数为1024。但是,转换后的样本数为925

编辑:我尝试了反向操作。由于我的应用程序从任何来源接收流,一些音频流为48KHz,另一些为44.1KHz。所以我尝试从44.1重新采样到48,以避免重新采样丢失。但是现在,每帧有1024个以上的样本,编码失败

编辑:我尝试使用libavfilter代替以下过滤器链:

int init_filter_graph(AVStream *audio_st) {
// create new graph
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
    av_log(NULL, AV_LOG_ERROR, "unable to create filter graph: out of memory\n");
    return -1;
}

AVFilter *abuffer = avfilter_get_by_name("abuffer");
AVFilter *aformat = avfilter_get_by_name("aformat");
AVFilter *asetnsamples = avfilter_get_by_name("asetnsamples");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");

int err;
// create abuffer filter
AVCodecContext *avctx = audio_st->codec;
AVRational time_base = audio_st->time_base;
snprintf(strbuf, sizeof(strbuf),
         "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
         time_base.num, time_base.den, avctx->sample_rate,
         av_get_sample_fmt_name(avctx->sample_fmt),
         avctx->channel_layout);
fprintf(stderr, "abuffer: %s\n", strbuf);
err = avfilter_graph_create_filter(&abuffer_ctx, abuffer,
                                   NULL, strbuf, NULL, filter_graph);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "error initializing abuffer filter\n");
    return err;
}
// create aformat filter
snprintf(strbuf, sizeof(strbuf),
         "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%" PRIx64,
         av_get_sample_fmt_name(AV_SAMPLE_FMT_FLTP), 44100,
         AV_CH_LAYOUT_STEREO);
fprintf(stderr, "aformat: %s\n", strbuf);
err = avfilter_graph_create_filter(&aformat_ctx, aformat,
                                   NULL, strbuf, NULL, filter_graph);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "unable to create aformat filter\n");
    return err;
}
// create asetnsamples filter
snprintf(strbuf, sizeof(strbuf),
         "n=1024:p=0");
fprintf(stderr, "asetnsamples: %s\n", strbuf);
err = avfilter_graph_create_filter(&asetnsamples_ctx, asetnsamples,
                                   NULL, strbuf, NULL, filter_graph);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "unable to create asetnsamples filter\n");
    return err;
}
// create abuffersink filter
err = avfilter_graph_create_filter(&abuffersink_ctx, abuffersink,
                                   NULL, NULL, NULL, filter_graph);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "unable to create aformat filter\n");
    return err;
}

// connect inputs and outputs
if (err >= 0) err = avfilter_link(abuffer_ctx, 0, aformat_ctx, 0);
if (err >= 0) err = avfilter_link(aformat_ctx, 0, asetnsamples_ctx, 0);
if (err >= 0) err = avfilter_link(asetnsamples_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "error connecting filters\n");
    return err;
}
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
    av_log(NULL, AV_LOG_ERROR, "error configuring the filter graph\n");
    return err;
}
return 0;
}
int init_filter_图(AVStream*audio_st){
//创建新图形
filter_graph=avfilter_graph_alloc();
if(!filter_图){
av_日志(空,av_日志_错误,“无法创建筛选图:内存不足\n”);
返回-1;
}
AVFilter*abuffer=AVFilter_get_by_name(“abuffer”);
AVFilter*aformat=AVFilter_get_by_name(“aformat”);
AVFilter*asetnsamples=AVFilter_get_by_name(“asetnsamples”);
AVFilter*abuffersink=AVFilter_get_by_name(“abuffersink”);
INTERR;
//创建阿布弗过滤器
AVCodecContext*avctx=音频->编解码器;
AVRational time\u base=音频\u st->time\u base;
snprintf(strbuf),sizeof(strbuf),
“时间基数=%d/%d:采样率=%d:采样频率=%s:通道布局=0x%”PRIx64,
time\u base.num、time\u base.den、avctx->sample\u rate、,
av_获取样本_fmt_名称(avctx->sample_fmt),
avctx->channel_布局);
fprintf(标准格式,“提供方:%s\n”,标准格式格式);
err=avfilter\u graph\u create\u filter(&abuffer\u ctx,abuffer,
空,strbuf,空,过滤图);
如果(误差<0){
av_日志(NULL,av_日志错误,“初始化阿布弗过滤器时出错”);
返回错误;
}
//创建一个格式过滤器
snprintf(strbuf),sizeof(strbuf),
“示例\u fmts=%s:示例\u速率=%d:通道\u布局=0x%”PRIx64,
av_get_sample_fmt_name(av_sample_fmt_FLTP),44100,
AV___________________________________;
fprintf(strderr,“格式:%s\n”,strbuf);
err=avfilter\u graph\u create\u filter(&aformat\u ctx,aformat,
空,strbuf,空,过滤图);
如果(误差<0){
av_日志(空,av_日志错误,“无法创建格式筛选器\n”);
返回错误;
}
//创建一个示例过滤器
snprintf(strbuf),sizeof(strbuf),
“n=1024:p=0”);
fprintf(标准格式,“asetnsamples:%s\n”,标准格式格式);
err=avfilter\u graph\u create\u filter(&asetnsamples\u ctx,asetnsamples,
空,strbuf,空,过滤图);
如果(误差<0){
av_日志(NULL,av_日志_错误,“无法创建AsetSamples筛选器\n”);
返回错误;
}
//创建一个过滤器
err=avfilter\u graph\u create\u filter(&abuffersink\u ctx,abuffersink,
NULL,NULL,NULL,filter_图);
如果(误差<0){
av_日志(空,av_日志错误,“无法创建格式筛选器\n”);
返回错误;
}
//连接输入和输出
如果(err>=0)err=avfilter\u链接(abuffer\u ctx,0,aform\u ctx,0);
如果(err>=0)err=avfilter\u link(格式ctx,0,asetnsamples\u ctx,0);
如果(err>=0)err=avfilter\u link(asetnsamples\u ctx,0,abuffersink\u ctx,0);
如果(误差<0){
av_日志(空,av_日志错误,“连接过滤器时出错\n”);
返回错误;
}
err=avfilter\u graph\u config(filter\u graph,NULL);
如果(误差<0){
av_日志(NULL,av_日志_错误,“配置过滤图时出错”);
返回错误;
}
返回0;
}

现在,生成的帧有1024个采样,但音频仍然不稳定。

不要为每个帧重新创建
SwrContext
。它需要将一些数据从一帧传送到下一帧,以平滑重采样帧之间的“边缘”


当您开始播放音频时,创建一个
SwrContext
,并为每个帧调用
swr\u convert\u frame

最后,我用来自的解决方案解决了这个问题

这是为我的设置创建过滤器的代码(重新采样到44.1KHz)

AVFilterGraph*filter\u graph=NULL;
AVFilterContext*buffersrc_ctx=NULL;
AVFilterContext*buffersink_ctx=NULL;
QString filter_description=“aresample=44100,aformat=sample\u fmts=fltp:channel\u layouts=立体声,asetnsamples=n=1024:p=0”;
/**
*初始化转换过滤器*/
int初始化音频过滤器(AVStream*INPUTSEAM){
char-args[512];
int ret;
AVFilter*buffersrc=AVFilter_get_by_name(“abuffer”);
AVFilter*buffersink=AVFilter_get_by_name(“abuffersink”);
AVFilterInOut*输出=avfilter_inout_alloc();
AVFilterInOut*输入=avfilter_inout_alloc();
filter_graph=avfilter_graph_alloc();
const enum AVSampleFormat out_sample_fmts[]={AV_sample_FMT_FLTP,AV_sample_FMT_NONE};
const int64_t out_channel_layouts[]={AV_CH_LAYOUT_STEREO,-1};
const int out_sample_rates[]={44100,-1};
snprintf(args,sizeof(args),“时基=%d/%d:采样率=%d:采样频率=%s:通道布局=0x%”PRIx64,
inputStream->codec->time\u base.num,inputStream->codec->time\u base.den,
输入流->编解码器->采样率,
av\u获取样本\u fmt\u名称(输入流->编解码器->样本\u fmt),
输入流->编解码器->频道布局);
ret=avfilter\u graph\u create\u filter(&buffersrc\u ctx,buffersrc,“in”,args,NULL,filter\u graph);
如果(ret<0){
svsCritical(“”,QString(“无法创建筛选器图,错误:%1”)。arg(svsavertoroformatedstring(ret)))
返回-1;
}
ret=avfilter\u graph\u create\u filter(&buffersink\u ctx,buffersink,“out”,NULL,NULL,filter\u graph);
如果(ret<0){
svsCritical(“”,QString(“无法创建缓冲区接收器,错误:%1”)。arg(svsavertoroformatedstring(ret)))
返回ret;
}
ret=av\U opt\U set\U int\U list(缓冲接收器ctx,“样本”FMT,“样本”FMT,-1,
AV_OPT_SEAR
AVFilterGraph *filter_graph = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
QString filter_description = "aresample=44100,aformat=sample_fmts=fltp:channel_layouts=stereo,asetnsamples=n=1024:p=0";
/**
* Initialize conversion filter */
int initialize_audio_filter(AVStream *inputStream) {
char args[512];
int ret;
AVFilter *buffersrc = avfilter_get_by_name("abuffer");
AVFilter *buffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
filter_graph = avfilter_graph_alloc();
const enum AVSampleFormat out_sample_fmts[] = {AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE};
const int64_t out_channel_layouts[] = {AV_CH_LAYOUT_STEREO, -1};
const int out_sample_rates[] = {44100, -1};

snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
         inputStream->codec->time_base.num, inputStream->codec->time_base.den,
         inputStream->codec->sample_rate,
         av_get_sample_fmt_name(inputStream->codec->sample_fmt),
         inputStream->codec->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);

if (ret < 0) {
    svsCritical("", QString("Could not create filter graph, error: %1").arg(svsAvErrorToFormattedString(ret)))
    return -1;
}

ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);

if (ret < 0) {
    svsCritical("", QString("Cannot create buffer sink, error: %1").arg(svsAvErrorToFormattedString(ret)))
    return ret;
}

ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
                          AV_OPT_SEARCH_CHILDREN);

if (ret < 0) {
    svsCritical("", QString("Cannot set output sample format, error: %1").arg(svsAvErrorToFormattedString(ret)))
    return ret;
}

ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
                          AV_OPT_SEARCH_CHILDREN);

if (ret < 0) {
    svsCritical("", QString("Cannot set output channel layout, error: %1").arg(svsAvErrorToFormattedString(ret)))
    return ret;
}

ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
                          AV_OPT_SEARCH_CHILDREN);

if (ret < 0) {
    svsCritical("", QString("Cannot set output sample rate, error: %1").arg(svsAvErrorToFormattedString(ret)))
    return ret;
}

/* Endpoints for the filter graph. */
outputs -> name = av_strdup("in");
outputs -> filter_ctx = buffersrc_ctx;
outputs -> pad_idx = 0;
outputs -> next = NULL;
/* Endpoints for the filter graph. */
inputs -> name = av_strdup("out");
inputs -> filter_ctx = buffersink_ctx;
inputs -> pad_idx = 0;
inputs -> next = NULL;

if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_description.toStdString().c_str(), &inputs, &outputs, NULL)) < 0) {
    svsCritical("", QString("Could not add the filter to graph, error: %1").arg(svsAvErrorToFormattedString(ret)))
}

if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
    svsCritical("", QString("Could not configure the graph, error: %1").arg(svsAvErrorToFormattedString(ret)))
}

/* Print summary of the sink buffer
 * Note: args buffer is reused to store channel layout string */
AVFilterLink *outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
svsInfo("", QString::asprintf("Output: srate:%dHz fmt:%s chlayout:%s\n",
                              (int) outlink->sample_rate,
                              (char *) av_x_if_null(av_get_sample_fmt_name((AVSampleFormat) outlink->format), "?"),
                              args))
return 0;
}
AVFrame* resampleAudio(const QString& key, AVFrame *frame) {

    /* Push the decoded frame into the filtergraph */
    qint32 ret;
    ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
    if(ret < 0) {
        svsWarning(key, QString("Error adding frame to buffer: %1").arg(svsAvErrorToFormattedString(ret)))
        // Delete input frame and return null
        av_frame_unref(frame);
        return nullptr;
    }

    AVFrame *resampled_frame = av_frame_alloc();

    /* Pull filtered frames from the filtergraph */
    ret = av_buffersink_get_frame(buffersink_ctx, resampled_frame);

    /* Set the timestamp on the resampled frame */
    resampled_frame->best_effort_timestamp = resampled_frame->pts;

    if(ret < 0) {
        // This is very common. For 48KHz -> 44.1KHz for some input frames the
        // filter has not data enough to generate another one.
        av_frame_unref(frame);
        av_frame_unref(resampled_frame);
        return nullptr;
    }
    av_frame_unref(frame);
    return resampled_frame;
}