在命令终端编译ffmpeg代码时遇到问题 我在终端中编译我的C++代码遇到了一些麻烦。我已经安装了ffmpeg,如下所示 ffmpeg version n4.1 Copyright (c) 2000-2018 the FFmpeg developers built with gcc 7 (Ubuntu 7.4.0-1ubuntu1~18.04.1) configuration: --enable-gpl --enable-version3 --disable-static --enable-shared --enable-small --enable-avisynth --enable-chromaprint --enable-frei0r --enable-gmp --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librsvg --enable-librubberband --enable-librtmp --enable-libshine --enable-libsmbclient --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtesseract --enable-libtheora --enable-libtwolame --enable-libv4l2 --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxvid --enable-libxml2 --enable-libzmq --enable-libzvbi --enable-lv2 --enable-libmysofa --enable-openal --enable-opencl --enable-opengl --enable-libdrm libavutil 56. 22.100 / 56. 22.100 libavcodec 58. 35.100 / 58. 35.100 libavformat 58. 20.100 / 58. 20.100 libavdevice 58. 5.100 / 58. 5.100 libavfilter 7. 40.101 / 7. 40.101 libswscale 5. 3.100 / 5. 3.100 libswresample 3. 3.100 / 3. 3.100 libpostproc 55. 3.100 / 55. 3.100 Hyper fast Audio and Video encoder usage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}... < >但是,当我编译我的C++代码时,目前正在试图从FFMPEG中的私有数据获取NTP时间戳,但是我需要包括它们的头文件?我查看了ffmpeg的libavformat文件夹,它确实有rtpdec.h,但是当我在命令行中编译它时,我得到了这个错误。(尝试为RTSPState和RTSPStream以及RTPDemuxContext包含此标题) cf.cpp:11:10:致命错误:libavformat/rtsp.h:没有这样的文件或目录 #包括 ^~~~~~~~~~~~~~~~~~~~ 编译终止。

在命令终端编译ffmpeg代码时遇到问题 我在终端中编译我的C++代码遇到了一些麻烦。我已经安装了ffmpeg,如下所示 ffmpeg version n4.1 Copyright (c) 2000-2018 the FFmpeg developers built with gcc 7 (Ubuntu 7.4.0-1ubuntu1~18.04.1) configuration: --enable-gpl --enable-version3 --disable-static --enable-shared --enable-small --enable-avisynth --enable-chromaprint --enable-frei0r --enable-gmp --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librsvg --enable-librubberband --enable-librtmp --enable-libshine --enable-libsmbclient --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtesseract --enable-libtheora --enable-libtwolame --enable-libv4l2 --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxvid --enable-libxml2 --enable-libzmq --enable-libzvbi --enable-lv2 --enable-libmysofa --enable-openal --enable-opencl --enable-opengl --enable-libdrm libavutil 56. 22.100 / 56. 22.100 libavcodec 58. 35.100 / 58. 35.100 libavformat 58. 20.100 / 58. 20.100 libavdevice 58. 5.100 / 58. 5.100 libavfilter 7. 40.101 / 7. 40.101 libswscale 5. 3.100 / 5. 3.100 libswresample 3. 3.100 / 3. 3.100 libpostproc 55. 3.100 / 55. 3.100 Hyper fast Audio and Video encoder usage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}... < >但是,当我编译我的C++代码时,目前正在试图从FFMPEG中的私有数据获取NTP时间戳,但是我需要包括它们的头文件?我查看了ffmpeg的libavformat文件夹,它确实有rtpdec.h,但是当我在命令行中编译它时,我得到了这个错误。(尝试为RTSPState和RTSPStream以及RTPDemuxContext包含此标题) cf.cpp:11:10:致命错误:libavformat/rtsp.h:没有这样的文件或目录 #包括 ^~~~~~~~~~~~~~~~~~~~ 编译终止。,c++,ffmpeg,C++,Ffmpeg,这是我的代码: #include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sstream> extern "C" { #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavforma

这是我的代码:

#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavformat/rtpdec.h>
#include <libswscale/swscale.h>
}

int main(int argc, char** argv) {

    // Open the initial context variables that are needed
    SwsContext *img_convert_ctx;
    AVFormatContext* format_ctx = avformat_alloc_context();
    AVCodecContext* codec_ctx = NULL;
    int video_stream_index;
    uint32_t* last_rtcp_ts;
    double* base_time;
    double* time;

    // Register everything
    av_register_all();
    avformat_network_init();

    //open RTSP
    if (avformat_open_input(&format_ctx, "rtsp://admin:password@192.168.1.67:554",
            NULL, NULL) != 0) {
        return EXIT_FAILURE;
    }

    if (avformat_find_stream_info(format_ctx, NULL) < 0) {
        return EXIT_FAILURE;
    }

    //search video stream
    for (int i = 0; i < format_ctx->nb_streams; i++) {
        if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            video_stream_index = i;
    }

    AVPacket packet;
    av_init_packet(&packet);

    //open output file
    AVFormatContext* output_ctx = avformat_alloc_context();

    AVStream* stream = NULL;
    int cnt = 0;

    //start reading packets from stream and write them to file
    av_read_play(format_ctx);    //play RTSP

    // Get the codec
    AVCodec *codec = NULL;
    codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!codec) {
        exit(1);
    }

// Add this to allocate the context by codec
codec_ctx = avcodec_alloc_context3(codec);

avcodec_get_context_defaults3(codec_ctx, codec);
avcodec_copy_context(codec_ctx, format_ctx->streams[video_stream_index]->codec);
std::ofstream output_file;

if (avcodec_open2(codec_ctx, codec, NULL) < 0)
    exit(1);

    img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,
            codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
            SWS_BICUBIC, NULL, NULL, NULL);

    int size = avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
            codec_ctx->height);
    uint8_t* picture_buffer = (uint8_t*) (av_malloc(size));
    AVFrame* picture = av_frame_alloc();
    AVFrame* picture_rgb = av_frame_alloc();
    int size2 = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
            codec_ctx->height);
    uint8_t* picture_buffer_2 = (uint8_t*) (av_malloc(size2));
    avpicture_fill((AVPicture *) picture, picture_buffer, AV_PIX_FMT_YUV420P,
            codec_ctx->width, codec_ctx->height);
    avpicture_fill((AVPicture *) picture_rgb, picture_buffer_2, AV_PIX_FMT_RGB24,
            codec_ctx->width, codec_ctx->height);

    while (av_read_frame(format_ctx, &packet) >= 0 && cnt < 1000) { //read ~ 1000 frames

        RTSPState* rt = format_ctx->priv_data;
        RTSPStream *rtsp_stream = rt->rtsp_streams[0];
        RTPDemuxContext* rtp_demux_context = rtsp_stream->transport_priv;
        uint32_t new_rtcp_ts = rtp_demux_context->last_rtcp_timestamp;
        uint64_t last_ntp_time = 0;

        if (new_rtcp_ts != *last_rtcp_ts) {
            *last_rtcp_ts = new_rtcp_ts;
            last_ntp_time = rtp_demux_context->last_rtcp_ntp_time;
            uint32_t seconds = ((last_ntp_time >> 32) & 0xffffffff) - 2208988800;
            uint32_t fraction  = (last_ntp_time & 0xffffffff);
            double useconds = ((double) fraction / 0xffffffff);
            *base_time = seconds + useconds;
            uint32_t d_ts = rtp_demux_context->timestamp - *last_rtcp_ts;
            *time = *base_time + d_ts / 90000.0;
            std::cout << "Time is: " << *time << std::endl;
        }

        std::cout << "1 Frame: " << cnt << std::endl;
        if (packet.stream_index == video_stream_index) {    //packet is video
            std::cout << "2 Is Video" << std::endl;
            if (stream == NULL) {    //create stream in file
                std::cout << "3 create stream" << std::endl;
                stream = avformat_new_stream(output_ctx,
                        format_ctx->streams[video_stream_index]->codec->codec);
                avcodec_copy_context(stream->codec,
                        format_ctx->streams[video_stream_index]->codec);
                stream->sample_aspect_ratio =
                        format_ctx->streams[video_stream_index]->codec->sample_aspect_ratio;
            }
            int check = 0;
            packet.stream_index = stream->id;
            std::cout << "4 decoding" << std::endl;
            int result = avcodec_decode_video2(codec_ctx, picture, &check, &packet);
            std::cout << "Bytes decoded " << result << " check " << check
                    << std::endl;
            if (cnt > 100)    //cnt < 0)
                    {
                sws_scale(img_convert_ctx, picture->data, picture->linesize, 0,
                        codec_ctx->height, picture_rgb->data, picture_rgb->linesize);
                std::stringstream file_name;
                file_name << "test" << cnt << ".ppm";
                output_file.open(file_name.str().c_str());
                output_file << "P3 " << codec_ctx->width << " " << codec_ctx->height
                        << " 255\n";
                for (int y = 0; y < codec_ctx->height; y++) {
                    for (int x = 0; x < codec_ctx->width * 3; x++)
                        output_file
                                << (int) (picture_rgb->data[0]
                                        + y * picture_rgb->linesize[0])[x] << " ";
                }
                output_file.close();
            }
            cnt++;
        }
        av_free_packet(&packet);
        av_init_packet(&packet);
    }
    av_free(picture);
    av_free(picture_rgb);
    av_free(picture_buffer);
    av_free(picture_buffer_2);

    av_read_pause(format_ctx);
    avio_close(output_ctx->pb);
    avformat_free_context(output_ctx);

    return (EXIT_SUCCESS);
}
#包括
#包括
#包括
#包括
#包括
外部“C”{
#包括
#包括
#包括
#包括
#包括
}
int main(int argc,字符**argv){
//打开所需的初始上下文变量
SwsContext*img\u convert\u ctx;
AVFormatContext*format_ctx=avformat_alloc_context();
AVCodecContext*codec_ctx=NULL;
int视频流索引;
uint32*最后一次rtcp;
双*基准时间;
双倍*时间;
//登记一切
av_寄存器_all();
avformat_network_init();
//开放式RTSP
如果(avformat\u open\u输入(&format\u ctx)rtsp://admin:password@192.168.1.67:554",
空,空)!=0){
返回退出失败;
}
if(avformat\u find\u stream\u info(format\u ctx,NULL)<0){
返回退出失败;
}
//搜索视频流
对于(int i=0;inb\u streams;i++){
如果(格式\u ctx->streams[i]->编解码器->编解码器\u类型==AVMEDIA\u类型\u视频)
视频流索引=i;
}
数据包;
av_初始_数据包(&数据包);
//打开输出文件
AVFormatContext*输出_ctx=avformat_alloc_context();
AVStream*stream=NULL;
int-cnt=0;
//开始从流中读取数据包并将其写入文件
av_read_play(格式为ctx);//播放RTSP
//获取编解码器
AVCodec*codec=NULL;
codec=avcodec\u find\u解码器(AV\u codec\u ID\u H264);
如果(!编解码器){
出口(1);
}
//添加此项以按编解码器分配上下文
codec_ctx=avcodec_alloc_context3(编解码器);
avcodec_get_context_defaults3(codec_ctx,codec);
avcodec\u copy\u context(编解码器\u ctx,格式\u ctx->streams[video\u stream\u index]->codec);
流输出文件的std::of;
如果(avcodec_open2(codec_ctx,codec,NULL)<0)
出口(1);
img\u convert\u ctx=sws\u getContext(编解码器\u ctx->宽度,编解码器\u ctx->高度,
codec_ctx->pix_fmt,codec_ctx->width,codec_ctx->height,AV_pix_fmt_RGB24,
SWS_双三次,NULL,NULL,NULL);
int size=avpicture\u get\u size(AV\u PIX\u FMT\u YUV420P,codec\u ctx->width,
编解码器(ctx->高度);
uint8_t*图片缓冲区=(uint8_t*)(av_malloc(大小));
AVFrame*picture=av_frame_alloc();
AVFrame*picture_rgb=av_frame_alloc();
int size2=avpicture\U get\U size(AV\U PIX\U FMT\U RGB24,编解码器\U ctx->宽度,
编解码器(ctx->高度);
uint8_t*图片缓冲区(uint8_t*)(av_malloc(size2));
avpicture_填充((avpicture*)图片、图片缓冲区、AV PIX_FMT_YUV420P、,
编解码器>宽度,编解码器>高度);
avpicture_填充((avpicture*)图片_rgb、图片_缓冲区_2、AV_PIX_FMT_RGB24、,
编解码器>宽度,编解码器>高度);
而(av_read_frame(format_ctx,&packet)>=0&&cnt<1000){//read~1000帧
RTSPState*rt=格式ctx->priv\u数据;
RTSPStream*rtsp_stream=rt->rtsp_streams[0];
RTPDemuxContext*rtp_demux_context=rtsp_stream->transport_priv;
uint32\u t new\u rtcp\u ts=rtp\u demux\u context->last\u rtcp\u时间戳;
uint64上次ntp时间=0;
如果(新的rtcp!=*最后的rtcp){
*last_rtcp_ts=new_rtcp_ts;
last_ntp_time=rtp_demux_context->last_rtcp_ntp_time;
uint32秒=((上次ntp时间>>32)和0xffffffff)-2208988800;
uint32_t分数=(上次ntp_时间&0xffffffff);
double useconds=((双)分数/0xffffffff);
*基本时间=秒+使用秒;
uint32_t d_ts=rtp_demux_context->timestamp-*last_rtcp_ts;
*时间=*基准时间+d\u ts/90000.0;
标准::cout-sample\u-aspect\u-ratio;
}
整数检查=0;
packet.stream_index=stream->id;

std::cout错误消息不一致。它表示未找到
rtsp.h
,而include声明要包含
rtpdec.h
。请确保所有内容都是从头开始构建的。我是从源代码构建的,但我不知道为什么会这样。我应该重新构建吗?这让我有点困惑。有点奇怪但是,
avio.h
avformat.h
被找到,而
rtpdec.h
不是。我指的是你的项目。我不知道你在使用什么IDE和构建系统,但尝试删除缓存的任何数据,确保所有文件都写入磁盘,重新启动IDE,等等。错误消息没有意义,因为它现在是这样的。似乎没有已编译的
cf.cpp
不是当前版本。我只是在ubuntu终端上编译。我会尝试使用IDE或其他东西。有点困惑。谢谢你的提示!不需要IDE,只需制作一个包含
#include
的测试文件,看看是否会给出相同的错误消息。我怀疑不会,你不知何故没有编译你想要的文件。但是如果我错了,请告诉我测试是否失败,并显示相同的错误消息。错误消息不一致。它表示未找到
rtsp.h
,而include声明要包含
rtpdec.h
。请确保从头开始构建所有内容。但是,我是从源代码构建的,我不知道为什么会这样。我应该重新重建吗?这让我有点困惑。但有点奇怪,
avio.h
avformat.h
被找到并且
rtpde
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavformat/rtpdec.h>
#include <libswscale/swscale.h>
}

int main(int argc, char** argv) {

    // Open the initial context variables that are needed
    SwsContext *img_convert_ctx;
    AVFormatContext* format_ctx = avformat_alloc_context();
    AVCodecContext* codec_ctx = NULL;
    int video_stream_index;
    uint32_t* last_rtcp_ts;
    double* base_time;
    double* time;

    // Register everything
    av_register_all();
    avformat_network_init();

    //open RTSP
    if (avformat_open_input(&format_ctx, "rtsp://admin:password@192.168.1.67:554",
            NULL, NULL) != 0) {
        return EXIT_FAILURE;
    }

    if (avformat_find_stream_info(format_ctx, NULL) < 0) {
        return EXIT_FAILURE;
    }

    //search video stream
    for (int i = 0; i < format_ctx->nb_streams; i++) {
        if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            video_stream_index = i;
    }

    AVPacket packet;
    av_init_packet(&packet);

    //open output file
    AVFormatContext* output_ctx = avformat_alloc_context();

    AVStream* stream = NULL;
    int cnt = 0;

    //start reading packets from stream and write them to file
    av_read_play(format_ctx);    //play RTSP

    // Get the codec
    AVCodec *codec = NULL;
    codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!codec) {
        exit(1);
    }

// Add this to allocate the context by codec
codec_ctx = avcodec_alloc_context3(codec);

avcodec_get_context_defaults3(codec_ctx, codec);
avcodec_copy_context(codec_ctx, format_ctx->streams[video_stream_index]->codec);
std::ofstream output_file;

if (avcodec_open2(codec_ctx, codec, NULL) < 0)
    exit(1);

    img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,
            codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
            SWS_BICUBIC, NULL, NULL, NULL);

    int size = avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
            codec_ctx->height);
    uint8_t* picture_buffer = (uint8_t*) (av_malloc(size));
    AVFrame* picture = av_frame_alloc();
    AVFrame* picture_rgb = av_frame_alloc();
    int size2 = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
            codec_ctx->height);
    uint8_t* picture_buffer_2 = (uint8_t*) (av_malloc(size2));
    avpicture_fill((AVPicture *) picture, picture_buffer, AV_PIX_FMT_YUV420P,
            codec_ctx->width, codec_ctx->height);
    avpicture_fill((AVPicture *) picture_rgb, picture_buffer_2, AV_PIX_FMT_RGB24,
            codec_ctx->width, codec_ctx->height);

    while (av_read_frame(format_ctx, &packet) >= 0 && cnt < 1000) { //read ~ 1000 frames

        RTSPState* rt = format_ctx->priv_data;
        RTSPStream *rtsp_stream = rt->rtsp_streams[0];
        RTPDemuxContext* rtp_demux_context = rtsp_stream->transport_priv;
        uint32_t new_rtcp_ts = rtp_demux_context->last_rtcp_timestamp;
        uint64_t last_ntp_time = 0;

        if (new_rtcp_ts != *last_rtcp_ts) {
            *last_rtcp_ts = new_rtcp_ts;
            last_ntp_time = rtp_demux_context->last_rtcp_ntp_time;
            uint32_t seconds = ((last_ntp_time >> 32) & 0xffffffff) - 2208988800;
            uint32_t fraction  = (last_ntp_time & 0xffffffff);
            double useconds = ((double) fraction / 0xffffffff);
            *base_time = seconds + useconds;
            uint32_t d_ts = rtp_demux_context->timestamp - *last_rtcp_ts;
            *time = *base_time + d_ts / 90000.0;
            std::cout << "Time is: " << *time << std::endl;
        }

        std::cout << "1 Frame: " << cnt << std::endl;
        if (packet.stream_index == video_stream_index) {    //packet is video
            std::cout << "2 Is Video" << std::endl;
            if (stream == NULL) {    //create stream in file
                std::cout << "3 create stream" << std::endl;
                stream = avformat_new_stream(output_ctx,
                        format_ctx->streams[video_stream_index]->codec->codec);
                avcodec_copy_context(stream->codec,
                        format_ctx->streams[video_stream_index]->codec);
                stream->sample_aspect_ratio =
                        format_ctx->streams[video_stream_index]->codec->sample_aspect_ratio;
            }
            int check = 0;
            packet.stream_index = stream->id;
            std::cout << "4 decoding" << std::endl;
            int result = avcodec_decode_video2(codec_ctx, picture, &check, &packet);
            std::cout << "Bytes decoded " << result << " check " << check
                    << std::endl;
            if (cnt > 100)    //cnt < 0)
                    {
                sws_scale(img_convert_ctx, picture->data, picture->linesize, 0,
                        codec_ctx->height, picture_rgb->data, picture_rgb->linesize);
                std::stringstream file_name;
                file_name << "test" << cnt << ".ppm";
                output_file.open(file_name.str().c_str());
                output_file << "P3 " << codec_ctx->width << " " << codec_ctx->height
                        << " 255\n";
                for (int y = 0; y < codec_ctx->height; y++) {
                    for (int x = 0; x < codec_ctx->width * 3; x++)
                        output_file
                                << (int) (picture_rgb->data[0]
                                        + y * picture_rgb->linesize[0])[x] << " ";
                }
                output_file.close();
            }
            cnt++;
        }
        av_free_packet(&packet);
        av_init_packet(&packet);
    }
    av_free(picture);
    av_free(picture_rgb);
    av_free(picture_buffer);
    av_free(picture_buffer_2);

    av_read_pause(format_ctx);
    avio_close(output_ctx->pb);
    avformat_free_context(output_ctx);

    return (EXIT_SUCCESS);
}