Visual c++ 用ffmpeg输出RTSP流

Visual c++ 用ffmpeg输出RTSP流,visual-c++,ffmpeg,rtsp,wowza,Visual C++,Ffmpeg,Rtsp,Wowza,我试图使用ffmpeg库将视频流从我的应用程序发送到媒体服务器(在本例中为wowza)。我已经能够做相反的事情并使用RTSP流,但是我在编写RTSP流时遇到了一些问题 我找到了一些例子,并试图利用相关的BIT。代码如下。我已经尽可能地简化了它。我只想向wowza服务器发送一个H264位的流,它可以处理这个流 当我尝试发送数据包时,每当在av_交错_write_帧函数中出现“整数除零”异常。异常似乎与未正确设置数据包时间戳有关。我尝试了不同的值,可以通过设置一些人为的值来克服异常,但写入调用失败

我试图使用ffmpeg库将视频流从我的应用程序发送到媒体服务器(在本例中为wowza)。我已经能够做相反的事情并使用RTSP流,但是我在编写RTSP流时遇到了一些问题

我找到了一些例子,并试图利用相关的BIT。代码如下。我已经尽可能地简化了它。我只想向wowza服务器发送一个H264位的流,它可以处理这个流

当我尝试发送数据包时,每当在av_交错_write_帧函数中出现“整数除零”异常。异常似乎与未正确设置数据包时间戳有关。我尝试了不同的值,可以通过设置一些人为的值来克服异常,但写入调用失败

#include <iostream>
#include <fstream>
#include <sstream>
#include <cstring>

#include "stdafx.h"
#include "windows.h"

extern "C"
{
    #include <libavcodec\avcodec.h>
    #include <libavformat\avformat.h>
    #include <libavformat\avio.h>
    #include <libswscale\swscale.h>
}

using namespace std;

static int video_is_eof;

#define STREAM_DURATION   50.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
#define VIDEO_CODEC_ID CODEC_ID_H264

static int sws_flags = SWS_BICUBIC;

/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;

static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /* rescale output packet timestamp values from codec to stream timebase */
    pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);

    pkt->stream_index = st->index;

    // Exception occurs here.
    return av_interleaved_write_frame(fmt_ctx, pkt);
}

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
    AVCodecContext *c;
    AVStream *st;

    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n", avcodec_get_name(codec_id));
        exit(1);
    }

    st = avformat_new_stream(oc, *codec);
    if (!st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }

    st->id = oc->nb_streams - 1;
    c = st->codec;
    c->codec_id = codec_id;
    c->bit_rate = 400000;
    c->width = 352;
    c->height = 288;
    c->time_base.den = STREAM_FRAME_RATE;
    c->time_base.num = 1;
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
    c->pix_fmt = STREAM_PIX_FMT;

    return st;
}

static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
    int ret;
    AVCodecContext *c = st->codec;

    /* open the codec */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: ");
        exit(1);
    }

    /* allocate and init a re-usable frame */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width = c->width;
    frame->height = c->height;

    /* Allocate the encoded raw picture. */
    ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate picture: ");
        exit(1);
    }

    /* copy data and linesize picture pointers to frame */
    *((AVPicture *)frame) = dst_picture;
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
    int x, y, i;

    i = frame_index;

    /* Y */
    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

    /* Cb and Cr */
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
        }
    }
}

static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{
    int ret;
    AVCodecContext *c = st->codec;

    if (!flush) {
        fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
    }

    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);

    /* encode the image */
    frame->pts = frame_count;
    ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding video frame:");
        exit(1);
    }
    /* If size is zero, it means the image was buffered. */

    if (got_packet) {
        ret = write_frame(oc, &c->time_base, st, &pkt);
    }
    else {
        if (flush) {
            video_is_eof = 1;
        }
        ret = 0;
    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: ");
        exit(1);
    }
    frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
    avcodec_close(st->codec);
    av_free(src_picture.data[0]);
    av_free(dst_picture.data[0]);
    av_frame_free(&frame);
}

int _tmain(int argc, _TCHAR* argv[])
{
    printf("starting...\n");

    const char *filename = "rtsp://test:password@192.168.33.19:1935/ffmpeg/0";

    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *video_st;
    AVCodec *video_codec;
    double video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();
    avformat_network_init();

    AVOutputFormat* oFmt = av_oformat_next(NULL);
    while (oFmt) {
        if (oFmt->video_codec == VIDEO_CODEC_ID) {
            break;
        }
        oFmt = av_oformat_next(oFmt);
    }

    if (!oFmt) {
        printf("Could not find the required output format.\n");
        exit(1);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, oFmt, "rtsp", filename);

    if (!oc) {
        printf("Could not set the output media context.\n");
        exit(1);
    }

    fmt = oc->oformat;
    if (!fmt) {
        printf("Could not create the output format.\n");
        exit(1);
    }

    video_st = NULL;

    cout << "Codec = " << avcodec_get_name(fmt->video_codec) << endl;
    if (fmt->video_codec != AV_CODEC_ID_NONE)
    {
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    }

    /* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
    if (video_st) {
        open_video(oc, video_codec, video_st);
    }

    av_dump_format(oc, 0, filename, 1);
    char errorBuff[80];

    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open outfile '%s': %s", filename, av_make_error_string(errorBuff, 80, ret));
            return 1;
        }
    }

    flush = 0;
    while (video_st && !video_is_eof) {
        /* Compute current video time. */
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush && (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        if (video_st && !video_is_eof) {
            write_video_frame(oc, video_st, flush);
        }
    }

    if (video_st) {
        close_video(oc, video_st);
    }

    if ((fmt->flags & AVFMT_NOFILE)) {
        avio_close(oc->pb);
    }

    avformat_free_context(oc);

    printf("finished.\n");

    getchar();

    return 0;
}
#包括
#包括
#包括
#包括
#包括“stdafx.h”
#包括“windows.h”
外部“C”
{
#包括
#包括
#包括
#包括
}
使用名称空间std;
静态int-video_是_-eof;
#定义流持续时间50.0
#定义流/帧/速率25/*25图像/秒*/
#定义流\u PIX\u FMT AV\u PIX\u FMT\u YUV420P/*默认PIX\u FMT*/
#定义视频\u编解码器\u ID编解码器\u ID\u H264
静态int sws_标志=sws_双三次;
/*视频输出*/
静态AVFrame*帧;
静态AVPicture src_图片、dst_图片;
静态整数帧计数;
静态整数写入帧(AVFormatContext*fmt\u ctx,const AVRational*time\u base,AVStream*st,AVPacket*pkt)
{
/*将输出数据包时间戳值从编解码器重新缩放到流时基*/
pkt->pts=av_rescale_q_rnd(pkt->pts,*时基,st->时基,AVRounding(av_ROUND_INF附近| av_ROUND_PASS_MINMAX));
pkt->dts=av_rescale_rnd(pkt->dts,*时基,st->时基,AVRounding(av_ROUND_NEAR_INF | av_ROUND_PASS_MINMAX));
pkt->duration=av\u rescale\u q(pkt->duration,*时基,st->时基);
pkt->stream_index=st->index;
//这里发生异常。
返回av_交织_写入_帧(fmt_ctx,pkt);
}
/*添加一个输出流*/
静态AVStream*添加_流(AVFormatContext*oc,AVCodec**codec,enum AVCodecID codec_id)
{
avc*c;
AVStream*st;
/*找到编码器*/
*codec=avcodec\u find\u编码器(codec\u id);
如果(!(*编解码器)){
fprintf(stderr,“找不到“%s”的编码器”,avcodec_get_name(codec_id));
出口(1);
}
st=avformat\ U new\ U流(oc,*编解码器);
如果(!st){
fprintf(stderr,“无法分配流”\n);
出口(1);
}
st->id=oc->nb\U流-1;
c=st->codec;
c->codec_id=codec_id;
c->比特率=400000;
c->宽度=352;
c->高度=288;
c->time\u base.den=流\u帧\u速率;
c->time_base.num=1;
c->gop_size=12;/*最多每12帧发射一帧内帧*/
c->pix_fmt=流_pix_fmt;
返回st;
}
静态void open_视频(AVFormatContext*oc、AVCodec*codec、AVStream*st)
{
int ret;
AVCodecContext*c=st->codec;
/*打开编解码器*/
ret=avcodec_open2(c,codec,NULL);
如果(ret<0){
fprintf(stderr,“无法打开视频编解码器:”);
出口(1);
}
/*分配并初始化一个可重用的帧*/
frame=av_frame_alloc();
如果(!帧){
fprintf(stderr,“无法分配视频帧”\n);
出口(1);
}
帧->格式=c->pix\U fmt;
框架->宽度=c->宽度;
框架->高度=c->高度;
/*分配已编码的原始图片*/
ret=avpicture_alloc(和dst_picture,c->pix_fmt,c->width,c->height);
如果(ret<0){
fprintf(stderr,“无法分配图片:”);
出口(1);
}
/*将数据和线宽图片指针复制到帧*/
*((AVPicture*)帧)=dst_图片;
}
/*准备一个虚拟图像*/
静态空白填充图像(AVPicture*pict,整数帧索引,整数宽度,整数高度)
{
int x,y,i;
i=帧索引;
/*Y*/
对于(y=0;y<高度;y++)
对于(x=0;xdata[0][y*pict->linesize[0]+x]=x+y+i*3;
/*Cb和Cr*/
对于(y=0;ydata[1][y*pict->linesize[1]+x]=128+y+i*2;
pict->data[2][y*pict->linesize[2]+x]=64+x+i*5;
}
}
}
静态无效写入视频帧(AVFormatContext*oc、AVStream*st、int flush)
{
int ret;
AVCodecContext*c=st->codec;
如果(!刷新){
填充图像(&dst图像,帧数,c->宽度,c->高度);
}
AVPacket pkt={0};
int-got_包;
av_初始_数据包(&pkt);
/*对图像进行编码*/
帧->pts=帧计数;
ret=avcodec\u encode\u video2(c,&pkt,flush?NULL:frame,&got\u数据包);
如果(ret<0){
fprintf(stderr,“视频帧错误编码”);
出口(1);
}
/*如果大小为零,则表示图像已缓冲*/
如果(得到_数据包){
ret=写入帧(oc和c->时基、st和pkt);
}
否则{
如果(齐平){
视频_为_eof=1;
}
ret=0;
}
如果(ret<0){
fprintf(stderr,“写入视频帧时出错:”);
出口(1);
}
帧计数++;
}
静态无效关闭_视频(AVFormatContext*oc、AVStream*st)
{
avcodec_关闭(st->codec);
av_免费(src_图片数据[0]);
无AVU(dst_图片数据[0]);
av_无帧(&帧);
}
int _tmain(int argc,_TCHAR*argv[]
{
printf(“开始…\n”);
常量字符*文件名=”rtsp://test:password@192.168.33.19:1935/ffmpeg/0”;
AVOutputFormat*fmt;
AVFormatContext*oc;
AVStream*视频;
AVCodec*视频编解码器;
双视频时间;
内平流,内平流;
/*初始化libavcodec,并注册所有编解码器和格式*/
av_寄存器_all();
avformat_network_init();
AVOutputFormat*oFmt=av\U OFFORMAT\U next(空);
while(oFmt){
如果(oFmt->video\u codec==video\u codec\u ID){
打破
}
oFmt=下一个格式的AVU(oFmt);
video_st->pts.val += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
// Roughly based on: https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html

#include <chrono>
#include <thread>
#include <tchar.h>

extern "C"
{
    #include <libavcodec\avcodec.h>
    #include <libavformat\avformat.h>
    #include <libavformat\avio.h>
    #include <libswscale\swscale.h>
    #include <libavutil\time.h>
}

#pragma comment(lib,"libavformat/libavformat.a")
#pragma comment(lib,"libavcodec/libavcodec.a")
#pragma comment(lib,"libavutil/libavutil.a")
#pragma comment(lib,"libswscale/libswscale.a")
#pragma comment(lib,"x264.lib")
#pragma comment(lib,"libswresample/libswresample.a")

using namespace std;

static int video_is_eof;

#define STREAM_DURATION   20
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT   AV_PIX_FMT_YUV420P /* default pix_fmt */ //AV_PIX_FMT_NV12;
#define VIDEO_CODEC_ID CODEC_ID_H264

/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
    AVCodecContext *c;
    AVStream *st;

    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        av_log(NULL, AV_LOG_ERROR, "Could not find encoder for '%s'.\n", avcodec_get_name(codec_id));
    }
    else {
        st = avformat_new_stream(oc, *codec);
        if (!st) {
            av_log(NULL, AV_LOG_ERROR, "Could not allocate stream.\n");
        }
        else {
            st->id = oc->nb_streams - 1;
            st->time_base.den = st->pts.den = 90000;
            st->time_base.num = st->pts.num = 1;

            c = st->codec;
            c->codec_id = codec_id;
            c->bit_rate = 400000;
            c->width = 352;
            c->height = 288;
            c->time_base.den = STREAM_FRAME_RATE;
            c->time_base.num = 1;
            c->gop_size = 12; /* emit one intra frame every twelve frames at most */
            c->pix_fmt = STREAM_PIX_FMT;
        }
    }

    return st;
}

static int open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
    int ret;
    AVCodecContext *c = st->codec;

    /* open the codec */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n", avcodec_get_name(c->codec_id));
    }
    else {

        /* allocate and init a re-usable frame */
        frame = av_frame_alloc();
        if (!frame) {
            av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
            ret = -1;
        }
        else {
            frame->format = c->pix_fmt;
            frame->width = c->width;
            frame->height = c->height;

            /* Allocate the encoded raw picture. */
            ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Could not allocate picture.\n");
            }
            else {
                /* copy data and linesize picture pointers to frame */
                *((AVPicture *)frame) = dst_picture;
            }
        }
    }

    return ret;
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
    int x, y, i;

    i = frame_index;

    /* Y */
    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

    /* Cb and Cr */
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
        }
    }
}

static int write_video_frame(AVFormatContext *oc, AVStream *st, int frameCount)
{
    int ret = 0;
    AVCodecContext *c = st->codec;

    fill_yuv_image(&dst_picture, frameCount, c->width, c->height);

    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);

    /* encode the image */
    frame->pts = frameCount;
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error encoding video frame.\n");
    }
    else {
        if (got_packet) {
            pkt.stream_index = st->index;
            pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            ret = av_write_frame(oc, &pkt);

            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while writing video frame.\n");
            }
        }
    }

    return ret;
}

int _tmain(int argc, _TCHAR* argv[])
{
    printf("starting...\n");

    const char *url = "rtsp://test:password@192.168.33.19:1935/ffmpeg/0";
    //const char *url = "rtsp://192.168.33.19:1935/ffmpeg/0";

    AVFormatContext *outContext;
    AVStream *video_st;
    AVCodec *video_codec;
    int ret = 0, frameCount = 0;

    av_log_set_level(AV_LOG_DEBUG);
    //av_log_set_level(AV_LOG_TRACE);

    av_register_all();
    avformat_network_init();

    avformat_alloc_output_context2(&outContext, NULL, "rtsp", url);

    if (!outContext) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
        goto end;
    }

    if (!outContext->oformat) {
        av_log(NULL, AV_LOG_FATAL, "Could not create the output format for '%s'.\n", url);
        goto end;
    }

    video_st = add_stream(outContext, &video_codec, VIDEO_CODEC_ID);

    /* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
    if (video_st) {
        av_log(NULL, AV_LOG_DEBUG, "Video stream codec %s.\n ", avcodec_get_name(video_st->codec->codec_id));

        ret = open_video(outContext, video_codec, video_st);
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Open video stream failed.\n");
            goto end;
        }
    }
    else {
        av_log(NULL, AV_LOG_FATAL, "Add video stream for the codec '%s' failed.\n", avcodec_get_name(VIDEO_CODEC_ID));
        goto end;
    }

    av_dump_format(outContext, 0, url, 1);

    ret = avformat_write_header(outContext, NULL);
    if (ret != 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to connect to RTSP server for '%s'.\n", url);
        goto end;
    }

    printf("Press any key to start streaming...\n");
    getchar();

    auto startSend = std::chrono::system_clock::now();

    while (video_st) {
        frameCount++;
        auto startFrame = std::chrono::system_clock::now();

        ret = write_video_frame(outContext, video_st, frameCount);

        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Write video frame failed.\n", url);
            goto end;
        }

        auto streamDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startSend).count();

        printf("Elapsed time %ldms, video stream pts %ld.\n", streamDuration, video_st->pts.val);

        if (streamDuration / 1000.0 > STREAM_DURATION) {
            break;
        }
        else {
            auto frameDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startFrame).count();
            std::this_thread::sleep_for(std::chrono::milliseconds((long)(1000.0 / STREAM_FRAME_RATE - frameDuration)));
        }
    }

    if (video_st) {
        avcodec_close(video_st->codec);
        av_free(src_picture.data[0]);
        av_free(dst_picture.data[0]);
        av_frame_free(&frame);
    }

    avformat_free_context(outContext);

end:
    printf("finished.\n");

    getchar();

    return 0;
}