Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何在约洛·达克内特的每门课程中找到自信_Python_Opencv_Deep Learning_Yolo_Darknet - Fatal编程技术网

Python 如何在约洛·达克内特的每门课程中找到自信

Python 如何在约洛·达克内特的每门课程中找到自信,python,opencv,deep-learning,yolo,darknet,Python,Opencv,Deep Learning,Yolo,Darknet,我用tiny yolo和darknet开发了我的定制物体探测器。它工作得很好,但我需要一个特定的功能: 网络输出边界框分别由类数+5个元素的向量表示。前4个元素表示中心x、中心y、宽度和高度。第五个元素表示边界框包围对象的置信度。其余元素是与每个类(即对象类型)相关联的置信度。对于每个框,我需要每个类关联的置信度,但我在输出中只有max confindece,其他的置信度输出为0 运行示例: 印刷分数 返回 0.5874982是最大置信度。这是第三节课。但我不明白,因为其他人的信心是0。谢谢你的

我用tiny yolo和darknet开发了我的定制物体探测器。它工作得很好,但我需要一个特定的功能: 网络输出边界框分别由类数+5个元素的向量表示。前4个元素表示中心x、中心y、宽度和高度。第五个元素表示边界框包围对象的置信度。其余元素是与每个类(即对象类型)相关联的置信度。对于每个框,我需要每个类关联的置信度,但我在输出中只有max confindece,其他的置信度输出为0

运行示例:

印刷分数 返回

0.5874982是最大置信度。这是第三节课。但我不明白,因为其他人的信心是0。谢谢你的重播,我很抱歉我的英语不好。这是密码

import cv2 as cv
 import argparse
 import sys
 import numpy as np
 import os.path

 confThreshold = 0.5 
 nmsThreshold = 0.6      
 inpWidth = 416          #Width of network's input image
 inpHeight = 416         #Height of network's input image


 parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
 parser.add_argument('--image', help='Path to image file.')
 parser.add_argument('--video', help='Path to video file.')
 args = parser.parse_args()

# Load names of classes
classesFile = "obj.names"
classes = None
with open(classesFile, 'rt') as f:
     classes = f.read().rstrip('\n').split('\n')

 # Give the configuration and weight files for the model and load the network using them.
 modelConfiguration = "yolov3-tiny-obj.cfg"
 modelWeights = "pesi/pesi_3_classi_new/yolov3-tiny-obj_7050.weights"

 net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
 net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
 net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

 # Get the names of the output layers
 def getOutputsNames(net):
    layersNames = net.getLayerNames()
    return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]

 # Draw the predicted bounding box
 def drawPred(classId, conf, left, top, right, bottom):
    if classId==1:
        cv.rectangle(frame, (left, top), (right, bottom), (3, 14, 186), 3)
    elif classId==0:
        cv.rectangle(frame, (left, top), (right, bottom), (40, 198, 31), 3)
    elif classId==2:
        cv.rectangle(frame, (left, top), (right, bottom), (40, 198, 31), 3)

    label = '%.2f' % conf

    # Get the label for the class name and its confidence
    if classes:
       assert(classId < len(classes))
       label = '%s:%s' % (classes[classId], label)

    #Display the label at the top of the bounding box
    labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    top = max(top, labelSize[1])
    cv.rectangle(frame, (left, top - round(1*labelSize[1])), (left + round(1*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
    cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.45, (0,0,0), 1)

  # Remove the bounding boxes with low confidence using non-maxima suppression
  def postprocess(frame, outs):
   frameHeight = frame.shape[0]
   frameWidth = frame.shape[1]

   # Scan through all the bounding boxes output from the network and keep only the
   # ones with high confidence scores. Assign the box's class label as the class with the highest score.
   classIds = []
   confidences = []
   boxes = []
   for out in outs:
       for detection in out:
           scores = detection[5:]
           classId = np.argmax(scores)
           confidence = scores[classId]
           if confidence > confThreshold:
               print(scores)
               center_x = int(detection[0] * frameWidth)
               center_y = int(detection[1] * frameHeight)
               width = int(detection[2] * frameWidth)
               height = int(detection[3] * frameHeight)
               left = int(center_x - width / 2)
               top = int(center_y - height / 2)
               classIds.append(classId)
               confidences.append(float(confidence))
               boxes.append([left, top, width, height])

    # Perform non maximum suppression to eliminate redundant overlapping boxes with
    # lower confidences.
    indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        drawPred(classIds[i], confidences[i], left, top, left + width, top + height)

# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)

outputFile = "yolo_out_py.avi"
 if (args.image):
   # Open the image file
   if not os.path.isfile(args.image):
       print("Input image file ", args.image, " doesn't exist")
       sys.exit(1)
   cap = cv.VideoCapture(args.image)
   outputFile = args.image[:-4]+'_yolo_out_py.jpg'
 elif (args.video):
   if not os.path.isfile(args.video):
      print("Input video file ", args.video, " doesn't exist")
      sys.exit(1)
   cap = cv.VideoCapture(args.video)
   outputFile = args.video[:-4]+'_yolo_out_py.avi'
 else:
    cap = cv.VideoCapture(0)

if (not args.image):
   vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 5, ( 
   round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))

while cv.waitKey(1) < 0:

   hasFrame, frame = cap.read()

   if not hasFrame:
      print("Done processing !!!")
      print("Output file is stored as ", outputFile)
      cv.waitKey(3000)
      # Release device
      cap.release()
      break

# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)

# Sets the input to the network
net.setInput(blob)

# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))

# Remove the bounding boxes with low confidence
postprocess(frame, outs)

# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for 
each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

if (args.image):
    cv.imwrite(outputFile, frame.astype(np.uint8))
else:
    vid_writer.write(frame.astype(np.uint8))

cv.imshow(winName, frame)

这可能是因为独立的逻辑分类器。此描述可能有助于您理解

类预测:YOLOv3为每个类使用独立的逻辑分类器,而不是常规的softmax层。这样做是为了使分类成为多标签分类。它意味着什么以及如何增加价值?举一个例子,在图片中显示了一名女性,并且该模型同时针对个人和女性进行了训练,在这里使用softmax将导致类别概率在这两个类别之间进行划分,例如0.4和0.45概率。但是独立分类器解决了这个问题,并给出了每个类的是与否概率,比如图片中有女性的概率是0.8,图片中有个人的概率是0.9,我们可以将对象标记为人和女人


是的,可能不是,这意味着,你得到的结果是所有预测的精确数据。我试过默认的YOLOv3,它输出的结果是这样的。有关分类的更多信息,请阅读此。
import cv2 as cv
 import argparse
 import sys
 import numpy as np
 import os.path

 confThreshold = 0.5 
 nmsThreshold = 0.6      
 inpWidth = 416          #Width of network's input image
 inpHeight = 416         #Height of network's input image


 parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
 parser.add_argument('--image', help='Path to image file.')
 parser.add_argument('--video', help='Path to video file.')
 args = parser.parse_args()

# Load names of classes
classesFile = "obj.names"
classes = None
with open(classesFile, 'rt') as f:
     classes = f.read().rstrip('\n').split('\n')

 # Give the configuration and weight files for the model and load the network using them.
 modelConfiguration = "yolov3-tiny-obj.cfg"
 modelWeights = "pesi/pesi_3_classi_new/yolov3-tiny-obj_7050.weights"

 net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
 net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
 net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

 # Get the names of the output layers
 def getOutputsNames(net):
    layersNames = net.getLayerNames()
    return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]

 # Draw the predicted bounding box
 def drawPred(classId, conf, left, top, right, bottom):
    if classId==1:
        cv.rectangle(frame, (left, top), (right, bottom), (3, 14, 186), 3)
    elif classId==0:
        cv.rectangle(frame, (left, top), (right, bottom), (40, 198, 31), 3)
    elif classId==2:
        cv.rectangle(frame, (left, top), (right, bottom), (40, 198, 31), 3)

    label = '%.2f' % conf

    # Get the label for the class name and its confidence
    if classes:
       assert(classId < len(classes))
       label = '%s:%s' % (classes[classId], label)

    #Display the label at the top of the bounding box
    labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    top = max(top, labelSize[1])
    cv.rectangle(frame, (left, top - round(1*labelSize[1])), (left + round(1*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
    cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.45, (0,0,0), 1)

  # Remove the bounding boxes with low confidence using non-maxima suppression
  def postprocess(frame, outs):
   frameHeight = frame.shape[0]
   frameWidth = frame.shape[1]

   # Scan through all the bounding boxes output from the network and keep only the
   # ones with high confidence scores. Assign the box's class label as the class with the highest score.
   classIds = []
   confidences = []
   boxes = []
   for out in outs:
       for detection in out:
           scores = detection[5:]
           classId = np.argmax(scores)
           confidence = scores[classId]
           if confidence > confThreshold:
               print(scores)
               center_x = int(detection[0] * frameWidth)
               center_y = int(detection[1] * frameHeight)
               width = int(detection[2] * frameWidth)
               height = int(detection[3] * frameHeight)
               left = int(center_x - width / 2)
               top = int(center_y - height / 2)
               classIds.append(classId)
               confidences.append(float(confidence))
               boxes.append([left, top, width, height])

    # Perform non maximum suppression to eliminate redundant overlapping boxes with
    # lower confidences.
    indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        drawPred(classIds[i], confidences[i], left, top, left + width, top + height)

# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)

outputFile = "yolo_out_py.avi"
 if (args.image):
   # Open the image file
   if not os.path.isfile(args.image):
       print("Input image file ", args.image, " doesn't exist")
       sys.exit(1)
   cap = cv.VideoCapture(args.image)
   outputFile = args.image[:-4]+'_yolo_out_py.jpg'
 elif (args.video):
   if not os.path.isfile(args.video):
      print("Input video file ", args.video, " doesn't exist")
      sys.exit(1)
   cap = cv.VideoCapture(args.video)
   outputFile = args.video[:-4]+'_yolo_out_py.avi'
 else:
    cap = cv.VideoCapture(0)

if (not args.image):
   vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 5, ( 
   round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))

while cv.waitKey(1) < 0:

   hasFrame, frame = cap.read()

   if not hasFrame:
      print("Done processing !!!")
      print("Output file is stored as ", outputFile)
      cv.waitKey(3000)
      # Release device
      cap.release()
      break

# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)

# Sets the input to the network
net.setInput(blob)

# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))

# Remove the bounding boxes with low confidence
postprocess(frame, outs)

# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for 
each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

if (args.image):
    cv.imwrite(outputFile, frame.astype(np.uint8))
else:
    vid_writer.write(frame.astype(np.uint8))

cv.imshow(winName, frame)