Ios 如何在Swift中处理现有视频的帧

Ios 如何在Swift中处理现有视频的帧,ios,iphone,swift,avfoundation,Ios,Iphone,Swift,Avfoundation,目前,我正在尝试使用OpenCV处理现有视频的帧。是否有任何AV阅读器库包含在播放视频时处理帧的委托方法?我知道如何通过使用AVCaptureVideoDataOutput和captureOutput委托方法在实时AVCaptureSession期间处理帧。播放视频有类似的功能吗 任何帮助都将被告知 Avassetrader/AvassetradeOutput是您需要的。查看CopyNextSampleBuffer方法 您可以使用AVVideoComposition 如果要使用CoreImage

目前,我正在尝试使用OpenCV处理现有视频的帧。是否有任何AV阅读器库包含在播放视频时处理帧的委托方法?我知道如何通过使用AVCaptureVideoDataOutput和captureOutput委托方法在实时AVCaptureSession期间处理帧。播放视频有类似的功能吗


任何帮助都将被告知

Avassetrader/AvassetradeOutput是您需要的。查看CopyNextSampleBuffer方法


您可以使用
AVVideoComposition

如果要使用
CoreImage
处理帧,可以通过调用
init(asset:applyingCIFiltersWithHandler:)
方法创建实例

或者,您可以创建自定义的共视器

通过实现 视频合成协议;提供了自定义视频合成器 在播放和播放过程中,每个视频源都有像素缓冲区 其他操作,并可以在上执行任意图形操作 它们是为了产生视觉输出

有关更多信息,请参阅。
您可以找到一个示例(但示例在Objective-C中)。

因为有人需要通过OpenCV处理视频帧

解码视频:

@objc public protocol ARVideoReaderDelegate : NSObjectProtocol {
    func reader(_ reader:ARVideoReader!, newFrameReady sampleBuffer:CMSampleBuffer?, _ frameCount:Int)
    func readerDidFinished(_ reader:ARVideoReader!, totalFrameCount:Int)
}
@objc open class ARVideoReader: NSObject {
    var _asset: AVURLAsset!
    @objc var _delegate: ARVideoReaderDelegate?

    @objc public init!(urlAsset asset:AVURLAsset){
        _asset = asset
        super.init()
    }

    @objc open func startReading() -> Void {
        if let reader = try? AVAssetReader.init(asset: _asset){

            let videoTrack = _asset.tracks(withMediaType: .video).compactMap{ $0 }.first;
            let options = [kCVPixelBufferPixelFormatTypeKey : Int(kCVPixelFormatType_32BGRA)]
            let readerOutput = AVAssetReaderTrackOutput.init(track: videoTrack!, outputSettings: options as [String : Any])
            reader.add(readerOutput)
            reader.startReading()

            var count = 0
            //reading
            while (reader.status == .reading && videoTrack?.nominalFrameRate != 0){
                let sampleBuffer = readerOutput.copyNextSampleBuffer()
                _delegate?.reader(self, newFrameReady: sampleBuffer, count)
                count = count+1;
            }
            _delegate?.readerDidFinished(self,totalFrameCount: count)
        }
    }
}
在委托的回调中:

//convert sampleBuffer to cv::Mat
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
char *baseBuffer = (char*)CVPixelBufferGetBaseAddress(imageBuffer);

cv::Mat cvImage = cv::Mat((int)height,(int)width,CV_8UC3);

cv::MatIterator_<cv::Vec3b> it_start = cvImage.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> it_end = cvImage.end<cv::Vec3b>();
long cur = 0;
size_t padding = CVPixelBufferGetBytesPerRow(imageBuffer) - width*4;
size_t offset = padding;
while (it_start != it_end) {
    //opt pixel
    long p_idx = cur*4 + offset;
    char b = baseBuffer[p_idx];
    char g = baseBuffer[p_idx + 1];
    char r = baseBuffer[p_idx + 2];
    cv::Vec3b newpixel(b,g,r);
    *it_start = newpixel;
    cur++;
    it_start++;
    if (cur%width == 0) {
        offset = offset + padding;
    }
}
CVPixelBufferUnlockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
//process cvImage now
//将sampleBuffer转换为cv::Mat
CVImageBufferRef imageBuffer=CMSampleBufferGetImageBuffer(sampleBuffer);
size\u t width=CVPixelBufferGetWidth(imageBuffer);
大小\u t高度=CVPixelBufferGetHeight(imageBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,kCVPixelBufferLock_只读);
char*baseBuffer=(char*)CVPixelBufferGetBaseAddress(imageBuffer);
cv::Mat cvImage=cv::Mat((int)高度,(int)宽度,cv_8UC3);
cv::MatIterator_uu_ustart=cvImage.begin();
cv::MatIterator_uu_end=cvImage.end();
长电流=0;
大小\u t padding=CVPixelBufferGetBytesPerRow(imageBuffer)-宽度*4;
尺寸偏移=填充;
while(it_start!=it_end){
//选择像素
长p_idx=电流*4+偏移量;
char b=基本缓冲区[p_idx];
charg=baseBuffer[p_idx+1];
charr=baseBuffer[p_idx+2];
cv::Vec3b新像素(b,g,r);
*it_start=newpixel;
cur++;
it__start++;
如果(当前%width==0){
偏移=偏移+填充;
}
}
CVPixelBufferUnlockBaseAddress(imageBuffer,kCVPixelBufferLock_只读);
//现在处理cvImage

以下是解决方案。多亏了Tim Bull的回答,我使用Avassetrader/AssetRetradeOutput实现了这一点

我在一个按钮内调用了以下函数单击以启动视频,并开始使用OpenCV处理每个帧:

func processVids() {
    guard let pathOfOrigVid = Bundle.main.path(forResource: "output_10_34_34", ofType: "mp4") else{
        print("video.m4v not found\n")
        exit(0)
    }
    
    var path: URL? = nil

    do{
        path = try FileManager.default.url(for: .documentDirectory, in:.userDomainMask, appropriateFor: nil, create: false)
        path = path?.appendingPathComponent("grayVideo.mp4")
    }catch{
        print("Unable to make URL to Movies path\n")
        exit(0)
    }
    
    
    let movie: AVURLAsset = AVURLAsset(url: NSURL(fileURLWithPath: pathOfOrigVid) as URL, options: nil)
    let tracks: [AVAssetTrack] = movie.tracks(withMediaType: AVMediaTypeVideo)
    let track: AVAssetTrack = tracks[0]
    var reader: AVAssetReader? = nil
    do{
        reader = try AVAssetReader(asset: movie)
    }
    catch{
        print("Problem initializing AVReader\n")
    }
    
    let settings : [String: Any?] = [
        String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32ARGB),
        String(kCVPixelBufferIOSurfacePropertiesKey): [:]
    ]

    let rout: AVAssetReaderTrackOutput = AVAssetReaderTrackOutput(track: track, outputSettings: settings)
    reader?.add(rout)
    reader?.startReading()
    
    
    DispatchQueue.global().async(execute: {
        while reader?.status == AVAssetReaderStatus.reading {
            if(rout.copyNextSampleBuffer() != nil){

                // Buffer of the frame to perform OpenCV processing on
                let sbuff: CMSampleBuffer = rout.copyNextSampleBuffer()!    
                
            }
            usleep(10000)
        }
        
    })
   
}

你好Tiko谢谢你的意见。最后我让它和Avassetrader一起工作。谢谢Tim!我最终让它与Avassetrader一起工作。嗨@NFarrell,如果你能上传你的解决方案,这将非常有帮助,我在使用苹果公司的Avassetraderhi@Vivian_.O文档的基础上让它工作时遇到了一些问题。是的,我可以挖掘解决方案并更新我的答案。那是很久以前的事了,所以我不确定它是否还存在,尽管我想出了一个解决方案@Vivian_.O。希望它能有所帮助!