Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/swift/19.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Swift 4,子类化CIFilter崩溃,仅使用;输入“;实例变量_Swift_Core Image_Swift4_Cifilter - Fatal编程技术网

Swift 4,子类化CIFilter崩溃,仅使用;输入“;实例变量

Swift 4,子类化CIFilter崩溃,仅使用;输入“;实例变量,swift,core-image,swift4,cifilter,Swift,Core Image,Swift4,Cifilter,您现在如何对CIFilter进行子类化?在Swift 3中,我可以做一个简单的例子: class CustomFilter: CIFilter { var inputImage: CIImage? var inputOrigin: CIVector? var inputAnotherVar: String? } 但是在Swift 4中,我得到了一个NSException。如果我从每个变量中删除“输入”,它就可以正常工作。我可以这么做。但我觉得我错过了一些重要的东西,我似乎找不

您现在如何对CIFilter进行子类化?在Swift 3中,我可以做一个简单的例子:

class CustomFilter: CIFilter {
   var inputImage: CIImage?
   var inputOrigin: CIVector?
   var inputAnotherVar: String?
}
但是在Swift 4中,我得到了一个NSException。如果我从每个变量中删除“输入”,它就可以正常工作。我可以这么做。但我觉得我错过了一些重要的东西,我似乎找不到任何东西来解释这种行为

这在Swift4中编译得很好:

class CustomFilter: CIFilter {
   var image: CIImage?
   var origin: CIVector?
   var anotherVar: String?
}
以下是操场上的错误:


根据评论,这里是一些Swift 4代码(与Swift 3相同),它既可以生成也可以执行。我不知道你的问题在哪里,所以如果这对你没有帮助,评论一下,我会删除它。(如果有帮助,我将编辑我的答案,使之更具体!)

第一个
CIFilter
使用
CIColorInvert
ciheightfieldfromsmask
基于
UILabel
中的文本创建“文本掩码”。它还
覆盖
CIFilter
outputImage
属性。第二个
CIFilter
实际上是一个围绕
CIKernel
的“包装器”,使用
CIImage
作为
inputImage
,掩码(来自第一个筛选器)作为
inputMask
,还像第一个一样覆盖
outputImage

几乎所有这些代码都是由Simon Gladman摘自的,现在可以作为iBook免费使用。虽然这本书是用Swift 2编写的,但我发现它是处理核心图像的宝贵资源

(旁注:这本书结合了所有这些。我在将其作为水印应用于现有应用程序时,将其拆分。我最终选择了另一条路线!)

面具。swift

public class Mask: CIFilter {
    public var inputExtent:CGRect?
    var inputRadius: Float = 15 {
        didSet {
            if oldValue != inputRadius {
                refractingImage = nil
            }
        }
    }
    private var refractingImage: CIImage?
    private var rawTextImage: CIImage?

    override public var outputImage: CIImage! {
        if refractingImage == nil {
            generateRefractingImage()
        }
        let mask = refractingImage?.applyingFilter("CIColorInvert", parameters: [:])
        return mask
    }

    func generateRefractingImage() {
        let label = UILabel(frame: inputExtent!)
        label.text = "grand canyon"
        label.font = UIFont.boldSystemFont(ofSize: 300)
        label.adjustsFontSizeToFitWidth = true
        label.textColor = UIColor.white

        UIGraphicsBeginImageContextWithOptions(
            CGSize(width: label.frame.width,
                   height: label.frame.height), true, 1)
        label.layer.render(in: UIGraphicsGetCurrentContext()!)
        let textImage = UIGraphicsGetImageFromCurrentImageContext()
        UIGraphicsEndImageContext()

        rawTextImage = CIImage(image: textImage!)!
        refractingImage = CIFilter(name: "CIHeightFieldFromMask",
                                   withInputParameters: [
                                    kCIInputRadiusKey: inputRadius,
                                    kCIInputImageKey: rawTextImage!])?.outputImage?
            .cropped(to: inputExtent!)
    }
}
折射。swift

public class Refraction: CIFilter {
    public var inputImage: CIImage?
    public var inputMask:CIImage?

    var inputRefractiveIndex: Float = 4.0
    var inputLensScale: Float = 50
    public var inputLightingAmount: Float = 1.5

    var inputLensBlur: CGFloat = 0
    public var inputBackgroundBlur: CGFloat = 2

    var inputRadius: Float = 15

    override public func setDefaults()
    {
        inputRefractiveIndex = 4.0
        inputLensScale = 50
        inputLightingAmount = 1.5
        inputRadius = 15
        inputLensBlur = 0
        inputBackgroundBlur = 2
    }

    override public var outputImage: CIImage! {
        guard let inputImage = inputImage, let refractingKernel = refractingKernel else {
            return nil
        }

        let extent = inputImage.extent
        let arguments = [inputImage,
                         inputMask!,
                         inputRefractiveIndex,
                         inputLensScale,
                         inputLightingAmount] as [Any]
        return refractingKernel.apply(extent: extent,
                                      roiCallback: {
                                        (index, rect) in
                                        return rect
        },
                                      arguments: arguments)!
    }

    let refractingKernel = CIKernel(source:
        "float lumaAtOffset(sampler source, vec2 origin, vec2 offset)" +
            "{" +
            " vec3 pixel = sample(source, samplerTransform(source, origin + offset)).rgb;" +
            " float luma = dot(pixel, vec3(0.2126, 0.7152, 0.0722));" +
            " return luma;" +
            "}" +


            "kernel vec4 lumaBasedRefract(sampler image, sampler refractingImage, float refractiveIndex, float lensScale, float lightingAmount) \n" +
            "{ " +
            " vec2 d = destCoord();" +

            " float northLuma = lumaAtOffset(refractingImage, d, vec2(0.0, -1.0));" +
            " float southLuma = lumaAtOffset(refractingImage, d, vec2(0.0, 1.0));" +
            " float westLuma = lumaAtOffset(refractingImage, d, vec2(-1.0, 0.0));" +
            " float eastLuma = lumaAtOffset(refractingImage, d, vec2(1.0, 0.0));" +

            " vec3 lensNormal = normalize(vec3((eastLuma - westLuma), (southLuma - northLuma), 1.0));" +

            " vec3 refractVector = refract(vec3(0.0, 0.0, 1.0), lensNormal, refractiveIndex) * lensScale; " +

            " vec3 outputPixel = sample(image, samplerTransform(image, d + refractVector.xy)).rgb;" +

            " outputPixel += (northLuma - southLuma) * lightingAmount ;" +
            " outputPixel += (eastLuma - westLuma) * lightingAmount ;" +

            " return vec4(outputPixel, 1.0);" +
        "}"
    )
}
用法

let filterMask = Mask()
let filter = Refraction()
var imgOriginal:CIImage!
var imgMask:CIImage!
var imgEdited:CIImage!

// I have a set of sliders that update a tuple and send an action that executes the following code

filterMask.inputRadius = sliders.valuePCP.3
imgMask = filterMask.outputImage
filter.inputMask = imgMask
filter.inputRefractiveIndex = sliders.valuePCP.0
filter.inputLensScale = sliders.valuePCP.1
filter.inputLightingAmount = sliders.valuePCP.2
imgEdited = filter.outputImage

希望这有帮助

根据这些评论,下面是一些Swift 4代码(与Swift 3相同),它既可以生成也可以执行。我不知道你的问题在哪里,所以如果这对你没有帮助,评论一下,我会删除它。(如果有帮助,我将编辑我的答案,使之更具体!)

第一个
CIFilter
使用
CIColorInvert
ciheightfieldfromsmask
基于
UILabel
中的文本创建“文本掩码”。它还
覆盖
CIFilter
outputImage
属性。第二个
CIFilter
实际上是一个围绕
CIKernel
的“包装器”,使用
CIImage
作为
inputImage
,掩码(来自第一个筛选器)作为
inputMask
,还像第一个一样覆盖
outputImage

几乎所有这些代码都是由Simon Gladman摘自的,现在可以作为iBook免费使用。虽然这本书是用Swift 2编写的,但我发现它是处理核心图像的宝贵资源

(旁注:这本书结合了所有这些。我在将其作为水印应用于现有应用程序时,将其拆分。我最终选择了另一条路线!)

面具。swift

public class Mask: CIFilter {
    public var inputExtent:CGRect?
    var inputRadius: Float = 15 {
        didSet {
            if oldValue != inputRadius {
                refractingImage = nil
            }
        }
    }
    private var refractingImage: CIImage?
    private var rawTextImage: CIImage?

    override public var outputImage: CIImage! {
        if refractingImage == nil {
            generateRefractingImage()
        }
        let mask = refractingImage?.applyingFilter("CIColorInvert", parameters: [:])
        return mask
    }

    func generateRefractingImage() {
        let label = UILabel(frame: inputExtent!)
        label.text = "grand canyon"
        label.font = UIFont.boldSystemFont(ofSize: 300)
        label.adjustsFontSizeToFitWidth = true
        label.textColor = UIColor.white

        UIGraphicsBeginImageContextWithOptions(
            CGSize(width: label.frame.width,
                   height: label.frame.height), true, 1)
        label.layer.render(in: UIGraphicsGetCurrentContext()!)
        let textImage = UIGraphicsGetImageFromCurrentImageContext()
        UIGraphicsEndImageContext()

        rawTextImage = CIImage(image: textImage!)!
        refractingImage = CIFilter(name: "CIHeightFieldFromMask",
                                   withInputParameters: [
                                    kCIInputRadiusKey: inputRadius,
                                    kCIInputImageKey: rawTextImage!])?.outputImage?
            .cropped(to: inputExtent!)
    }
}
折射。swift

public class Refraction: CIFilter {
    public var inputImage: CIImage?
    public var inputMask:CIImage?

    var inputRefractiveIndex: Float = 4.0
    var inputLensScale: Float = 50
    public var inputLightingAmount: Float = 1.5

    var inputLensBlur: CGFloat = 0
    public var inputBackgroundBlur: CGFloat = 2

    var inputRadius: Float = 15

    override public func setDefaults()
    {
        inputRefractiveIndex = 4.0
        inputLensScale = 50
        inputLightingAmount = 1.5
        inputRadius = 15
        inputLensBlur = 0
        inputBackgroundBlur = 2
    }

    override public var outputImage: CIImage! {
        guard let inputImage = inputImage, let refractingKernel = refractingKernel else {
            return nil
        }

        let extent = inputImage.extent
        let arguments = [inputImage,
                         inputMask!,
                         inputRefractiveIndex,
                         inputLensScale,
                         inputLightingAmount] as [Any]
        return refractingKernel.apply(extent: extent,
                                      roiCallback: {
                                        (index, rect) in
                                        return rect
        },
                                      arguments: arguments)!
    }

    let refractingKernel = CIKernel(source:
        "float lumaAtOffset(sampler source, vec2 origin, vec2 offset)" +
            "{" +
            " vec3 pixel = sample(source, samplerTransform(source, origin + offset)).rgb;" +
            " float luma = dot(pixel, vec3(0.2126, 0.7152, 0.0722));" +
            " return luma;" +
            "}" +


            "kernel vec4 lumaBasedRefract(sampler image, sampler refractingImage, float refractiveIndex, float lensScale, float lightingAmount) \n" +
            "{ " +
            " vec2 d = destCoord();" +

            " float northLuma = lumaAtOffset(refractingImage, d, vec2(0.0, -1.0));" +
            " float southLuma = lumaAtOffset(refractingImage, d, vec2(0.0, 1.0));" +
            " float westLuma = lumaAtOffset(refractingImage, d, vec2(-1.0, 0.0));" +
            " float eastLuma = lumaAtOffset(refractingImage, d, vec2(1.0, 0.0));" +

            " vec3 lensNormal = normalize(vec3((eastLuma - westLuma), (southLuma - northLuma), 1.0));" +

            " vec3 refractVector = refract(vec3(0.0, 0.0, 1.0), lensNormal, refractiveIndex) * lensScale; " +

            " vec3 outputPixel = sample(image, samplerTransform(image, d + refractVector.xy)).rgb;" +

            " outputPixel += (northLuma - southLuma) * lightingAmount ;" +
            " outputPixel += (eastLuma - westLuma) * lightingAmount ;" +

            " return vec4(outputPixel, 1.0);" +
        "}"
    )
}
用法

let filterMask = Mask()
let filter = Refraction()
var imgOriginal:CIImage!
var imgMask:CIImage!
var imgEdited:CIImage!

// I have a set of sliders that update a tuple and send an action that executes the following code

filterMask.inputRadius = sliders.valuePCP.3
imgMask = filterMask.outputImage
filter.inputMask = imgMask
filter.inputRefractiveIndex = sliders.valuePCP.0
filter.inputLensScale = sliders.valuePCP.1
filter.inputLightingAmount = sliders.valuePCP.2
imgEdited = filter.outputImage
希望这有帮助

我在试验Simon Gladman的“Swift核心图像”时,在Swift 4中遇到了这个问题(同样的“错误:执行被中断,原因:EXC_BAD_指令…”)。我还尝试在应用程序中运行示例代码,而不是在操场上运行。我的解决方案是在
var-inputImage:CIImage?
前面添加一个
@objc-dynamic
,在您的代码中,它将如下所示:

class CustomFilter: CIFilter {
    @objc dynamic var inputImage: CIImage?
    var inputOrigin: CIVector?
    var inputAnotherVar: String?
}
据我所知,这是因为Swift 4默认情况下会最小化推理,从而减少二进制代码的大小。相反,Swift 3隐式地推断Objc属性。这在实践中意味着,我必须将
@objc dynamic
添加到某些变量中,这些变量将利用Objective-C的动态调度,例如在设置CoreImage筛选器时:
filter.setValue(inputImage,forKey:kCIInputImageKey)
。以下是一些资源,它们描述了类似的问题,以及当您使用时如何处理调度。

我在试验Simon Gladman的“Swift核心映像”时,在Swift 4中遇到了这个问题(同样的“错误:执行被中断,原因:EXC_BAD_指令…”)。我还尝试在应用程序中运行示例代码,而不是在操场上运行。我的解决方案是在
var-inputImage:CIImage?
前面添加一个
@objc-dynamic
,在您的代码中,它将如下所示:

class CustomFilter: CIFilter {
    @objc dynamic var inputImage: CIImage?
    var inputOrigin: CIVector?
    var inputAnotherVar: String?
}

据我所知,这是因为Swift 4默认情况下会最小化推理,从而减少二进制代码的大小。相反,Swift 3隐式地推断Objc属性。这在实践中意味着,我必须将
@objc dynamic
添加到某些变量中,这些变量将利用Objective-C的动态调度,例如在设置CoreImage筛选器时:
filter.setValue(inputImage,forKey:kCIInputImageKey)
。以下是一些参考资料,它们描述了类似的问题,以及当您执行调度时如何处理。

在我看来,您似乎缺少一些代码。为什么
inputImage
a
CIVector
而不是某种图像(可能是
CIImage
)?如果这不是问题所在,也许您可以提供更多有关
CustomFilter
的代码?感谢您的关注!你说得对,这个名字有误导性。但是我重新编辑了这个问题,希望能更清楚地表达我的意思。我可能仍然有点困惑,但我使用了
CIFilter
CIKernel
三种方法-其中只有一种需要子类化
CIFilter
。我检查了我的代码(Swift 3和4),例如,
public var inputImage:CIImage?
这两个版本对我来说都很好。除了将我的类声明为public(它是框架目标的一部分)之外,我不明白为什么会有问题。您是否遇到生成错误?运行时错误