Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/objective-c/25.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Objective c 在iOS中使用ffmpeg录制rtsp流_Objective C_Ffmpeg_Video Streaming_Rtsp - Fatal编程技术网

Objective c 在iOS中使用ffmpeg录制rtsp流

Objective c 在iOS中使用ffmpeg录制rtsp流,objective-c,ffmpeg,video-streaming,rtsp,Objective C,Ffmpeg,Video Streaming,Rtsp,我在swift项目中成功地流式传输了rtsp。在本项目中,它还具有记录功能。它基本上使用avformat\u write\u header ,av_interleaved_write_frame和av_write_trail将rtsp源保存到mp4文件中 当我在我的设备中使用这个项目时,rtsp流媒体工作正常,但录制功能将始终生成一个没有图像和声音的空白mp4文件 谁能告诉我我错过了哪一步 我正在使用iPhone5和iOS 9.1和XCode 7.1.1。 ffmpeg是2.8.3版本,遵循编译

我在swift项目中成功地流式传输了rtsp。在本项目中,它还具有记录功能。它基本上使用
avformat\u write\u header
av_interleaved_write_frame
av_write_trail
将rtsp源保存到mp4文件中

当我在我的设备中使用这个项目时,rtsp流媒体工作正常,但录制功能将始终生成一个没有图像和声音的空白mp4文件

谁能告诉我我错过了哪一步

我正在使用iPhone5和iOS 9.1和XCode 7.1.1。 ffmpeg是2.8.3版本,遵循编译指令

下面是这个项目中的示例代码

生成每个帧的函数:

-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;

while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {

        // 20130525 albert.liao modified start

        // Initialize a new format context for writing file
        if(veVideoRecordState!=eH264RecIdle)
        {
            switch(veVideoRecordState)
            {
                case eH264RecInit:
                {                        
                    if ( !pFormatCtx_Record )
                    {
                        int bFlag = 0;
                        //NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:@"Documents/test.mp4"];
                        NSString *videoPath = @"/Users/liaokuohsun/iFrameTest.mp4";

                        const char *file = [videoPath UTF8String];
                        pFormatCtx_Record = avformat_alloc_context();
                        bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );

                        if(bFlag==true)
                        {
                            veVideoRecordState = eH264RecActive;
                            fprintf(stderr, "h264_file_create success\n");                                
                        }
                        else
                        {
                            veVideoRecordState = eH264RecIdle;
                            fprintf(stderr, "h264_file_create error\n");
                        }
                    }
                }
                //break;

                case eH264RecActive:
                {
                    if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
                    {
                        bFirstIFrame=true;
                        vPTS = packet.pts ;
                        vDTS = packet.dts ;
#if 0
                        NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
                        [pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
                        [NSTimer scheduledTimerWithTimeInterval:5.0//2.0
                                                         target:self
                                                       selector:@selector(StopRecording:)
                                                       userInfo:nil
                                                        repeats:NO];
#endif
                    }

                    // Record audio when 1st i-Frame is obtained
                    if(bFirstIFrame==true)
                    {
                        if ( pFormatCtx_Record )
                        {
#if PTS_DTS_IS_CORRECT==1
                            packet.pts = packet.pts - vPTS;
                            packet.dts = packet.dts - vDTS;

#endif
                                h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);

                        }
                        else
                        {
                            NSLog(@"pFormatCtx_Record no exist");
                        }
                    }
                }
                break;

                case eH264RecClose:
                {
                    if ( pFormatCtx_Record )
                    {
                        h264_file_close(pFormatCtx_Record);
#if 0
                        // 20130607 Test
                        dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
                        {
                            ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
                            NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:@"Documents/test.mp4"];
                            NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
                            if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
                            {
                                [library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
                                    if (error) {
                                        // TODO: error handling
                                        NSLog(@"writeVideoAtPathToSavedPhotosAlbum error");
                                    } else {
                                        // TODO: success handling
                                        NSLog(@"writeVideoAtPathToSavedPhotosAlbum success");
                                    }
                                }];
                            }
                            [library release];
                        });
#endif
                        vPTS = 0;
                        vDTS = 0;
                        vAudioPTS = 0;
                        vAudioDTS = 0;
                        pFormatCtx_Record = NULL;
                        NSLog(@"h264_file_close() is finished");
                    }
                    else
                    {
                        NSLog(@"fc no exist");
                    }
                    bFirstIFrame = false;
                    veVideoRecordState = eH264RecIdle;

                }
                break;

                default:
                    if ( pFormatCtx_Record )
                    {
                        h264_file_close(pFormatCtx_Record);
                        pFormatCtx_Record = NULL;
                    }
                    NSLog(@"[ERROR] unexpected veVideoRecordState!!");
                    veVideoRecordState = eH264RecIdle;
                    break;
            }
        }

        // Decode video frame
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    }
    else if(packet.stream_index==audioStream)
    {
        // 20131024 albert.liao modfied start
        static int vPktCount=0;
        BOOL bIsAACADTS = FALSE;
        int ret = 0;

        if(aPlayer.vAACType == eAAC_UNDEFINED)
        {
            tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
            bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
        }

        @synchronized(aPlayer)
        {
            if(aPlayer==nil)
            {
                aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
                NSLog(@"aPlayer initAudio");

                if(bIsAACADTS)
                {
                    aPlayer.vAACType = eAAC_ADTS;
                    //NSLog(@"is ADTS AAC");
                }
            }
            else
            {
                if(vPktCount<5) // The voice is listened once image is rendered
                {
                    vPktCount++;
                }
                else
                {
                    if([aPlayer getStatus]!=eAudioRunning)
                    {
                        dispatch_async(dispatch_get_main_queue(), ^(void) {
                            @synchronized(aPlayer)
                            {
                                NSLog(@"aPlayer start play");
                                [aPlayer Play];
                            }

                        });
                    }
                }
            }
        };

        @synchronized(aPlayer)
        {
            int ret = 0;

            ret = [aPlayer putAVPacket:&packet];
            if(ret <= 0)
                NSLog(@"Put Audio Packet Error!!");

        }

        // 20131024 albert.liao modfied end

        if(bFirstIFrame==true)
        {
            switch(veVideoRecordState)
            {
                case eH264RecActive:
                {
                    if ( pFormatCtx_Record )
                    {
                        h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);

                    }
                    else
                    {
                        NSLog(@"pFormatCtx_Record no exist");
                    }
                }
            }
        }
    }
    else
    {
        //fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
                packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
    }
    // 20130525 albert.liao modified end
}
return frameFinished!=0;
}
av_写入_预告片:

void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
    return;

av_write_trailer( fc );


if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
    avio_close( fc->pb );

av_free( fc );
}

谢谢。

看起来您对输入和输出使用了相同的AVFormatContext

排队

pst = fc->streams[ vStreamIdx ];
您正在从与输入(RTSP流)连接的AVFormatContext分配AVStream*。但是,稍后您将尝试将数据包写回相同的上下文
av_interleaved_write_帧(fc,&pkt)。我认为上下文是一个文件,它帮助我更好地导航这类东西。我做了一些与您正在做的事情相同的事情(不是iOS),在这里,我为每个输入(RTSP流)和输出(mp4文件)使用单独的AVFormatContext。如果我是正确的,我认为您只需要初始化一个AVFormatContext并正确地执行

下面的代码(没有任何错误检查)是我用来获取与RTSP流相关联的
AVFormatContext*output\u format\u context=NULL
AVFormatContext*input\u format\u context
并从一个写入另一个的代码。这是在我获取了一个数据包等之后,在您的情况下,看起来您正在填充(我只是从AVU read_帧中获取数据包并重新打包)

这是可能在write frame函数中的代码(但它也包括头的写入)

AVFormatContext*输出\格式\上下文;
AVStream*in_stream_2;
AVStream*输出流2;
//为输出文件分配上下文
avformat_alloc_output_context2(&output_format_context,NULL,NULL,out_filename.c_str());
//指向AVOutputFormat*用于操作的输出格式
输出格式=输出格式上下文->格式;
//环游所有河流
对于(i=0;inb\u streams;i++){
//创建一个指针,指向代码前面分配的输入流
AVStream*in_stream=input_format_context->streams[i];
//创建指向将成为输出一部分的新流的指针
AVStream*out\u stream=avformat\u new\u stream(输出\u format\u上下文,in\u stream->codec->codec);
//将新输出流的time_base设置为等于输入流1,因为我没有更改任何内容(可以避免,但会收到弃用警告)
出流->时基=入流->时基;
//这是将所有参数从输入流复制到输出流的一种不推荐的方法,因为所有参数都保持不变
avcodec_参数_来自_上下文(out_stream->codepar,in_stream->codec);
//我不记得这是干什么用的:)
out\u stream->codec->codec\u tag=0;
//这只是将一个标志从格式上下文设置到与头相关的流
if(输出格式上下文->格式->标志和AVFMT\U全局标题)
out_stream->codec->flags |=AV_codec_FLAG_GLOBAL_头;
}
//选中NOFILE标志并打开输出文件上下文(以前输出文件与格式上下文关联,现在实际打开)。
if(!(输出格式->标志和AVFMT文件))
avio_打开(&output_format_context->pb,out_filename.c_str(),avio_FLAG_WRITE);
//写标题(不确定是否总是需要,但h264我相信是的。
avformat\ U write\ U头(输出\格式\上下文,空);
//重新获取上面填充的适当流(这应同时考虑音频/视频)
在\流\ 2中=输入\格式\上下文->流[数据包.流\索引];
out_stream_2=输出_格式_上下文->流[packet.stream_index];
//重新缩放pts和dts、持续时间和pos-您可以根据需要在代码中执行操作。
packet.pts=av_rescale_q_rnd(packet.pts,in_stream_2->time_base,out_stream_2->time_base,(avround)(av_ROUND_NEAR_INF|av ROUND_PASS_MINMAX));
packet.dts=av_rescale_q_rnd(packet.dts,in_stream_2->时基,out_stream_2->时基,(AVRounding)(av_ROUND_NEAR_INF|av_ROUND_PASS_MINMAX));
packet.duration=av\u rescale\u q(packet.duration,in\u stream\u 2->时基,out\u stream\u 2->时基);
packet.pos=-1;
//我的流的第一个数据包总是给我负dts/pts,所以这只是出于我的目的保护第一个数据包。你可能不需要。
如果(packet.dts<0)packet.dts=0;
如果(packet.pts<0)packet.pts=0;
//最后写出框架
av交织写入帧(输出格式、上下文和数据包);
// ....
//写入标题、关闭/清除…等
// ....
这段代码非常简单,没有包含设置(听起来你做得很正确)。我也可以想象这段代码可以被清理和调整,以满足你的需要,但这对我来说可以将RTSP流重新写入一个文件(在我的例子中,有许多文件,但没有显示代码)

这段代码是C代码,所以您可能需要做一些小的调整,以使其与Swift兼容(对于一些库函数调用可能是这样)。不过,我认为总体而言,它应该是兼容的


希望这有助于为您指明正确的方向。多亏了几个示例代码源(我不记得在哪里),以及库本身的警告提示,这篇文章才拼凑在一起。

您有解决方案吗?请给我建议?如果您有任何问题,请更新代码……我也在寻找相同的解决方案
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
    return;

av_write_trailer( fc );


if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
    avio_close( fc->pb );

av_free( fc );
}
pst = fc->streams[ vStreamIdx ];
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
    // Create a pointer to the input stream that was allocated earlier in the code
    AVStream *in_stream = input_format_context->streams[i];
    // Create a pointer to a new stream that will be part of the output
    AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
    // Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
    out_stream->time_base = in_stream->time_base;
    // This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
    avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
    // I don't remember what this is for :)
    out_stream->codec->codec_tag = 0;
    // This just sets a flag from the format context to the stream relating to the header
    if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
         out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
    avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes.  You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....