Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/cassandra/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Ios 如何使用前后摄像头录制视频并保持录制?_Ios_Objective C_Iphone_Video_Avfoundation - Fatal编程技术网

Ios 如何使用前后摄像头录制视频并保持录制?

Ios 如何使用前后摄像头录制视频并保持录制?,ios,objective-c,iphone,video,avfoundation,Ios,Objective C,Iphone,Video,Avfoundation,我用的是AVFoundation。我想同时使用(正面和背面)摄像机录制视频。我在一边录制视频,当我将相机模式改回到前面时,相机仍然冻结。是否可以在两侧连续录制视频 示例代码: - (void) startup { if (_session == nil) { NSLog(@"Starting up server"); self.isCapturing = NO; self.isPaused = NO; _currentFile = 0; _discon

我用的是AVFoundation。我想同时使用(正面和背面)摄像机录制视频。我在一边录制视频,当我将相机模式改回到前面时,相机仍然冻结。是否可以在两侧连续录制视频

示例代码:

- (void) startup
{
 if (_session == nil)
{
    NSLog(@"Starting up server");

    self.isCapturing = NO;
    self.isPaused = NO;
    _currentFile = 0;
    _discont = NO;

    // create capture device with video input
    _session = [[AVCaptureSession alloc] init];
    AVCaptureDevice *backCamera = [self frontCamera];

    AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
    [_session addInput:input];

    // audio input from default mic
    AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
    [_session addInput:micinput];

    // create an output for YUV output with self as delegate
    _captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
    AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
    [videoout setSampleBufferDelegate:self queue:_captureQueue];
    NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
                                    [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
                                    nil];
    videoout.videoSettings = setcapSettings;
    [_session addOutput:videoout];
    _videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
    // find the actual dimensions used so we can set up the encoder to the same.
    NSDictionary* actual = videoout.videoSettings;
    _cy = [[actual objectForKey:@"Height"] integerValue];
    _cx = [[actual objectForKey:@"Width"] integerValue];

    AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
    [audioout setSampleBufferDelegate:self queue:_captureQueue];
    [_session addOutput:audioout];
    _audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
    // for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
    // we need to wait for the first sample

    // start capture and a preview layer
    [_session startRunning];

    _preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
    _preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
   }
  }

  - (AVCaptureDevice *)frontCamera
  {
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
    if ([device position] == AVCaptureDevicePositionFront) {
        return device;
    }
  }
  return nil;
 }

 - (AVCaptureDevice *)backCamera
 {
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
    if ([device position] == AVCaptureDevicePositionBack) {
        return device;
    }
}
return nil;
}

- (void) startupFront
{
_session = nil;
[_session stopRunning];
if (_session == nil)
{
    NSLog(@"Starting up server");

    self.isCapturing = NO;
    self.isPaused = NO;
    _currentFile = 0;
    _discont = NO;

    // create capture device with video input
    _session = [[AVCaptureSession alloc] init];
    AVCaptureDevice *backCamera = [self backCamera];
    AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
    [_session addInput:input];

    // audio input from default mic
    AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
    [_session addInput:micinput];

    // create an output for YUV output with self as delegate
    _captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
    AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
    [videoout setSampleBufferDelegate:self queue:_captureQueue];
    NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
                                    [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
                                    nil];
    videoout.videoSettings = setcapSettings;
    [_session addOutput:videoout];
    _videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
    // find the actual dimensions used so we can set up the encoder to the same.
    NSDictionary* actual = videoout.videoSettings;
    _cy = [[actual objectForKey:@"Height"] integerValue];
    _cx = [[actual objectForKey:@"Width"] integerValue];

    AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
    [audioout setSampleBufferDelegate:self queue:_captureQueue];
    [_session addOutput:audioout];
    _audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
    // for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
    // we need to wait for the first sample

    // start capture and a preview layer
    [_session startRunning];

    _preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
    _preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
   }
   }

  - (void) startCapture
  {
   @synchronized(self)
    {
    if (!self.isCapturing)
    {
        NSLog(@"starting capture");

        // create the encoder once we have the audio params
        _encoder = nil;
        self.isPaused = NO;
        _discont = NO;
        _timeOffset = CMTimeMake(0, 0);
        self.isCapturing = YES;
     }
     }
   }

 - (void) stopCapture
 {
@synchronized(self)
{
    if (self.isCapturing)
    {
        NSString* filename = [NSString stringWithFormat:@"capture%d.mp4", _currentFile];
        NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
        NSURL* url = [NSURL fileURLWithPath:path];
        _currentFile++;

        // serialize with audio and video capture

        self.isCapturing = NO;
        dispatch_async(_captureQueue, ^{
            [_encoder finishWithCompletionHandler:^{
                self.isCapturing = NO;
                _encoder = nil;
                ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
                [library writeVideoAtPathToSavedPhotosAlbum:url completionBlock:^(NSURL *assetURL, NSError *error){
                    NSLog(@"save completed");
                    [[NSFileManager defaultManager] removeItemAtPath:path error:nil];
                }];
            }];
        });
    }
}
 }

 - (void) pauseCapture
 {
@synchronized(self)
{
    if (self.isCapturing)
    {
        NSLog(@"Pausing capture");
        self.isPaused = YES;
        _discont = YES;
    }
}
 }

    - (void) resumeCapture
   {
  @synchronized(self)
{
    if (self.isPaused)
    {
        NSLog(@"Resuming capture");
        self.isPaused = NO;
    }
}
  }

    - (CMSampleBufferRef) adjustTime:(CMSampleBufferRef) sample by:(CMTime) offset
 {
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count);
CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
    pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset);
    pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset);
}
CMSampleBufferRef sout;
CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout);
free(pInfo);
return sout;
 }

 - (void) setAudioFormat:(CMFormatDescriptionRef) fmt
 {
const AudioStreamBasicDescription *asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt);
_samplerate = asbd->mSampleRate;
_channels = asbd->mChannelsPerFrame;
 }

  - (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
 {
BOOL bVideo = YES;

@synchronized(self)
{
    if (!self.isCapturing  || self.isPaused)
    {
        return;
    }
    if (connection != _videoConnection)
    {
        bVideo = NO;
    }
    if ((_encoder == nil) && !bVideo)
    {
        CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);
        [self setAudioFormat:fmt];
        NSString* filename = [NSString stringWithFormat:@"capture%d.mp4", _currentFile];
        NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
        _encoder = [VideoEncoder encoderForPath:path Height:_cy width:_cx channels:_channels samples:_samplerate];
    }
    if (_discont)
    {
        if (bVideo)
        {
            return;
        }
        _discont = NO;
        // calc adjustment
        CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        CMTime last = bVideo ? _lastVideo : _lastAudio;
        if (last.flags & kCMTimeFlags_Valid)
        {
            if (_timeOffset.flags & kCMTimeFlags_Valid)
            {
                pts = CMTimeSubtract(pts, _timeOffset);
            }
            CMTime offset = CMTimeSubtract(pts, last);
            NSLog(@"Setting offset from %s", bVideo?"video": "audio");
            NSLog(@"Adding %f to %f (pts %f)", ((double)offset.value)/offset.timescale, ((double)_timeOffset.value)/_timeOffset.timescale, ((double)pts.value/pts.timescale));

            // this stops us having to set a scale for _timeOffset before we see the first video time
            if (_timeOffset.value == 0)
            {
                _timeOffset = offset;
            }
            else
            {
                _timeOffset = CMTimeAdd(_timeOffset, offset);
            }
        }
        _lastVideo.flags = 0;
        _lastAudio.flags = 0;
    }

    // retain so that we can release either this or modified one
    CFRetain(sampleBuffer);

    if (_timeOffset.value > 0)
    {
        CFRelease(sampleBuffer);
        sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
    }

    // record most recent time so we know the length of the pause
    CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
    CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
    if (dur.value > 0)
    {
        pts = CMTimeAdd(pts, dur);
    }
    if (bVideo)
    {
        _lastVideo = pts;
    }
    else
    {
        _lastAudio = pts;
    }
}

// pass frame to encoder
[_encoder encodeFrame:sampleBuffer isVideo:bVideo];
CFRelease(sampleBuffer);
 }

 - (void) shutdown
 {
NSLog(@"shutting down server");
if (_session)
{
    [_session stopRunning];
    _session = nil;
}
[_encoder finishWithCompletionHandler:^{
    NSLog(@"Capture completed");
}];
 }
-(无效)启动
{
如果(_session==nil)
{
NSLog(@“启动服务器”);
self.isCapturing=否;
self.isPaused=否;
_currentFile=0;
_不连续=否;
//创建带有视频输入的捕获设备
_会话=[[AVCaptureSession alloc]init];
AVCaptureDevice*后摄像头=[自前摄像头];
AVCaptureDeviceInput*输入=[AVCaptureDeviceInputDeviceInputWithDevice:backCamera错误:nil];
[_会话添加输入:输入];
//来自默认麦克风的音频输入
AVCaptureDevice*mic=[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
AVCaptureDeviceInput*micinput=[AVCaptureDeviceInputDeviceInputWithDevice:mic error:nil];
[_sessionaddinput:micinput];
//使用self作为委托为YUV输出创建输出
_captureQueue=dispatch\u queue\u create(“com.softcraftsystems.comss”,dispatch\u queue\u SERIAL);
AVCaptureVideoDataOutput*videoout=[[AVCaptureVideoDataOutput alloc]init];
[videoout setSampleBufferDelegate:自队列:_captureQueue];
NSDictionary*setcapSettings=[NSDictionary Dictionary WithObjectsAndKeys:
[NSNumber numberwhint:kCVPixelFormatType_420YPCBCCR8BIPLANARVIDERANGE],kCVPixelBufferPixelFormatTypeKey,
零];
videoout.videoSettings=setcapSettings;
[_sessionaddoutput:videoout];
_videoConnection=[VideoOutConnectionWithMediaType:AVMediaTypeVideo];
//找到使用的实际尺寸,以便我们可以将编码器设置为相同的尺寸。
NSDictionary*actual=videoout.videoSettings;
_cy=[[actual objectForKey:@“Height”]integerValue];
_cx=[[actual objectForKey:@“Width”]integerValue];
AVCaptureAudioDataOutput*audioout=[[AVCaptureAudioDataOutput alloc]init];
[audioout setSampleBufferDelegate:自队列:_captureQueue];
[_sessionaddoutput:audioout];
_audioConnection=[AudioOutConnectionWithMediaType:AVMediaTypeAudio];
//对于音频,我们需要频道和采样率,但我们无法从ios上的audioout.audiosettings获取,所以
//我们需要等待第一个样品
//开始捕获和预览图层
[_会话开始固定];
_预览=[AVCaptureVideoPreviewLayer layerWithSession:_session];
_preview.videoGravity=AVLayerVideoGravityResizeAspectFill;
}
}
-(AVCaptureDevice*)前置摄像头
{
NSArray*设备=[AVCaptureDevicesWithMediaType:AVMediaTypeVideo];
用于(AVCaptureDevice*设备中的设备){
if([设备位置]==AVCaptureDevicePositionFront){
返回装置;
}
}
返回零;
}
-(AVCaptureDevice*)后置摄像头
{
NSArray*设备=[AVCaptureDevicesWithMediaType:AVMediaTypeVideo];
用于(AVCaptureDevice*设备中的设备){
if([设备位置]==AVCaptureDevicePositionBack){
返回装置;
}
}
返回零;
}
-(无效)前
{
_会话=零;
[_会话停止运行];
如果(_session==nil)
{
NSLog(@“启动服务器”);
self.isCapturing=否;
self.isPaused=否;
_currentFile=0;
_不连续=否;
//创建带有视频输入的捕获设备
_会话=[[AVCaptureSession alloc]init];
AVCaptureDevice*后置摄像头=[自后置摄像头];
AVCaptureDeviceInput*输入=[AVCaptureDeviceInputDeviceInputWithDevice:backCamera错误:nil];
[_会话添加输入:输入];
//来自默认麦克风的音频输入
AVCaptureDevice*mic=[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
AVCaptureDeviceInput*micinput=[AVCaptureDeviceInputDeviceInputWithDevice:mic error:nil];
[_sessionaddinput:micinput];
//使用self作为委托为YUV输出创建输出
_captureQueue=dispatch\u queue\u create(“com.softcraftsystems.comss”,dispatch\u queue\u SERIAL);
AVCaptureVideoDataOutput*videoout=[[AVCaptureVideoDataOutput alloc]init];
[videoout setSampleBufferDelegate:自队列:_captureQueue];
NSDictionary*setcapSettings=[NSDictionary Dictionary WithObjectsAndKeys:
[NSNumber numberwhint:kCVPixelFormatType_420YPCBCCR8BIPLANARVIDERANGE],kCVPixelBufferPixelFormatTypeKey,
零];
videoout.videoSettings=setcapSettings;
[_sessionaddoutput:videoout];
_videoConnection=[VideoOutConnectionWithMediaType:AVMediaTypeVideo];
//找到使用的实际尺寸,以便我们可以将编码器设置为相同的尺寸。
NSDictionary*actual=videoout.videoSettings;
_cy=[[actual objectForKey:@“Height”]integerValue];
_cx=[[actual objectForKey:@“Width”]integerValue];
AVCaptureAudioDataOutput*audioout=[[AVCaptureAudioDataOutput alloc]init];
[audioout setSampleBufferDelegate:自队列:_captureQueue];
[_sessionaddoutput:audioout];
_audioConnection=[AudioOutConnectionWithMediaType:AVMediaTypeAudio];
//对于音频,我们需要频道和采样率,但我们无法从ios上的audioout.audiosettings获取,所以
//我们需要等待第一个样品
//开始捕获和预览图层
[_会话开始固定];
_预览=[AVCaptureVideoPreviewLayer layerWithSession:_session];
_preview.videoGravity=AVLayerVideoGravityResizeAspectFill;
}
}
-(无效)开始许可证
{
@同步(自)
{
如果(!self.isCapturing)
{
NSLog(“开始捕获”);
//一旦我们有了音频参数,就创建编码器
_编码器=零;
self.isPaused=否;
_不连续=否;
_timeOffset=CMTimeMake(0,0);
self.isCapturing=是;
}
}
}
-(无效)停止捕获
{
@同步(自)
{
if(自捕捉)
{
NSString*f