变量显示为nil-swift 4 IOS

变量显示为nil-swift 4 IOS,ios,swift,null,var,let,Ios,Swift,Null,Var,Let,出于某种原因,我的变量stringy和stringy在控制台上打印得很好,但是当我尝试将它们设置为标签时,它们显示为nil 我的目标是将字符串和浮点值打印到应用程序视图控制器,但这不起作用 我认为它与viewdidload有关,好像它隐藏了全局变量。但是,如果我试图在viewdidload之外设置标签,则会出现声明错误 // ViewController.swift // Intellicam // import UIKit import AVKit import Vision clas

出于某种原因,我的变量stringy和stringy在控制台上打印得很好,但是当我尝试将它们设置为标签时,它们显示为nil

我的目标是将字符串和浮点值打印到应用程序视图控制器,但这不起作用

我认为它与viewdidload有关,好像它隐藏了全局变量。但是,如果我试图在viewdidload之外设置标签,则会出现声明错误

//  ViewController.swift
//  Intellicam
//

import UIKit
import AVKit
import Vision

class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {

    var stringy:String!
    var stringie:Float!

    override func viewDidLoad() {
        super.viewDidLoad()



        //here we start the camera

        let captureSession = AVCaptureSession()
        captureSession.sessionPreset = .photo
        guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
        guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
        captureSession.addInput(input)
        captureSession.startRunning()

        let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        view.layer.addSublayer(previewLayer)
        previewLayer.frame = view.frame

        let dataOutput = AVCaptureVideoDataOutput()
        dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
        captureSession.addOutput(dataOutput)

     //   let request = VNCoreMLModel(model: VNCoreMLModel, completionHandler: VNRequestCompletionHandler)
     //   VNImageRequestHandler(cgImage: <#T##CGImage#>, options: <#T##[VNImageOption : Any]#>)

        self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"

    }

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        //print("Camera was able to capture a frame:", Date())

        guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}

        guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
        let request = VNCoreMLRequest(model: model){
            (finishedReq, err) in
            //print(finishedReq.results)

            guard let results = finishedReq.results as? [VNClassificationObservation] else {return}

             guard let firstObservastion = results.first else {return}


            //print("Guess: \(firstObservastion.identifier) Certainty: \(firstObservastion.confidence)%")

            self.stringy = firstObservastion.identifier
            self.stringie = firstObservastion.confidence

            print(self.stringy)
            print(self.stringie)

        }

        try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
    }

    @IBOutlet weak var Labele: UILabel!


}
//ViewController.swift
//智能摄像机
//
导入UIKit
进口AVKit
进口视觉
类ViewController:UIViewController、AVCaptureVideoDataOutputSampleBufferDelegate{
字符串:字符串!
斯金吉:漂浮!
重写func viewDidLoad(){
super.viewDidLoad()
//我们开始拍照
让captureSession=AVCaptureSession()
captureSession.sessionPreset=.photo
guard let captureDevice=AVCaptureDevice.default(对于:。视频)else{return}
guard let input=try?AVCaptureDeviceInput(设备:captureDevice)else{return}
captureSession.addInput(输入)
captureSession.startRunning()
让previewLayer=AVCaptureVideoPreviewLayer(会话:captureSession)
view.layer.addSublayer(预览层)
previewLayer.frame=view.frame
让dataOutput=AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self,队列:DispatchQueue(标签:“videoQueue”))
captureSession.addOutput(数据输出)
//let request=VNCoreMLModel(模型:VNCoreMLModel,completionHandler:VNRequestCompletionHandler)
//VNImageRequestHandler(cgImage:,选项:)
self.Labele.text=“猜测:\(细线)+确定性:\(细线)”
}
func captureOutput(\uOutput:AVCaptureOutput,didOutput sampleBuffer:CMSampleBuffer,from connection:AVCaptureConnection){
//打印(“相机能够捕获帧:”,日期())
guard let pixelBuffer:CVPixelBuffer=CMSampleBufferGetImageBuffer(sampleBuffer)else{return}
guard let model=try?VNCoreMLModel(for:Resnet50().model)else{return}
let request=VNCoreMLRequest(模型:model){
(完成,错误)在
//打印(完成查询结果)
guard let results=finishedReq.results as?[vnclassionobservation]else{return}
guard let firstObservastion=results.first-else{return}
//打印(“猜测:\(firstobservation.identifier)确定性:\(firstobservation.confidence)%”)
self.stringy=firstobservation.identifier
自信心
打印(自紧)
打印(self.stringie)
}
try?VNImageRequestHandler(cvPixelBuffer:pixelBuffer,选项:[:])。执行([请求])
}
@IBVAR标签:UILabel!
}

第一件事是,在确定价值之前,不要强行展开。在您的情况下,VNCoreModelRequest可能会失败,并且您的两个变量都将被取消分配,因此它将公然使您的应用程序崩溃。 还有一件事要确保对标签使用正确的命名约定

你们的问题是你们并没有根据你们得到的结果设置标签值

要解决这个问题

 var stringy:String? {
    didSet {
        DispatchQueue.main.async {
            self.Labele.text = self.stringy
        }
    }
}


您的请求是异步的。您需要移动逻辑以在请求完成内设置标签,或者对
captureOutput
方法进行完成并在那里进行处理。这应该会有所帮助-在确定stringy和stringie不是零之后,只需设置标签的值。您可以在captureOutput函数中直接执行此操作,但需要在主线程中执行,因为这是一个UI更新。DispatchQueue.main.async{self.Labele.text=“猜测:(stringy)+确定性:(stringie)”}
        self.stringy = firstObservastion.identifier
        self.stringie = firstObservastion.confidence
        DispatchQueue.main.async {
             self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"
         }