Python 3.x 仅在图像/视频帧的某些区域检查边界框的可用性

Python 3.x 仅在图像/视频帧的某些区域检查边界框的可用性,python-3.x,tensorflow,opencv3.0,object-detection,bounding-box,Python 3.x,Tensorflow,Opencv3.0,Object Detection,Bounding Box,我试图在现场视频流的某个区域检测汽车。为此,我使用了Tensorflow的对象检测API。现在,检测已经足够公平了,现场视频流中几乎所有的汽车都被检测为带有边框的汽车,并且在百分比上有一定的检测置信度分数 我的问题是,如何仅检查所需边界框上所需边界框的可用性 例如,由于所需区域和用于检测的相机都固定在位置上,因此我使用OpenCV的cv2.矩形函数并传递所需区域的x1、y1和x2、y2坐标。现在,这个区域周围有一个恒定的矩形框。我的任务是通过向Ubuntu终端打印一条检测到的日志消息,以某种方式

我试图在现场视频流的某个区域检测汽车。为此,我使用了Tensorflow的对象检测API。现在,检测已经足够公平了,现场视频流中几乎所有的汽车都被检测为带有边框的汽车,并且在百分比上有一定的检测置信度分数

我的问题是,如何仅检查所需边界框上所需边界框的可用性

例如,由于所需区域和用于检测的相机都固定在位置上,因此我使用OpenCV的cv2.矩形函数并传递所需区域的x1、y1和x2、y2坐标。现在,这个区域周围有一个恒定的矩形框。我的任务是通过向Ubuntu终端打印一条检测到的日志消息,以某种方式知道汽车已经到达这个标记的矩形区域

我很难比较边框坐标和矩形坐标。因此,问题出现了,如何

仅抓取所需的边界框是否有所需的检测车辆? 是否在这些边界框位于矩形/标记区域内时检测? 这是我使用的代码

import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile

from collections import defaultdict
from io import StringIO
from PIL import Image

import cv2
cap = cv2.VideoCapture(0)
# This is needed since the notebook is stored in the object_detection 
folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops

if tf.__version__ != '1.10.1':
  raise ImportError('Please upgrade your tensorflow installation to 
v1.10.1* or later!')


# ## Env setup

# In[3]:

# ## Object detection imports
# Here are the imports from the object detection module.

# In[5]:

from utils import label_map_util
from utils import visualization_utils as vis_util

# # Model preparation 

# ## Variables

# Any model exported using the `export_inference_graph.py` tool can be 
loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a 
new .pb file.  
# 
# By default we use an "SSD with Mobilenet" model here. See the 
[detection model zoo] 

# In[6]:
# What model to download.
MODEL_NAME = 'car_inference_graph'

# Path to frozen detection graph. This is the actual model that is 
used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')

NUM_CLASSES = 1

# ## Load a (frozen) Tensorflow model into memory.

# In[7]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
  with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
    serialized_graph = fid.read()
    od_graph_def.ParseFromString(serialized_graph)
    tf.import_graph_def(od_graph_def, name='')

# ## Loading label map
# Label maps map indices to category names, so that when our 
convolution network predicts `5`, we know that this corresponds to 
`airplane`.  Here we use internal utility functions, but anything that 
returns a dictionary mapping integers to appropriate string labels 
would be fine

# In[8]:

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, 
max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

# ## Helper code

# In[9]:

def load_image_into_numpy_array(image):
  (im_width, im_height) = image.size
  return np.array(image.getdata()).reshape(
      (im_height, im_width, 3)).astype(np.uint8)

# # Detection

def run_inference_for_single_image(image, graph):
  with graph.as_default():
    with tf.Session() as sess:
      # Get handles to input and output tensors
      ops = tf.get_default_graph().get_operations()
      all_tensor_names = {output.name for op in ops for output in 
 op.outputs}
      tensor_dict = {}
      for key in [
          'num_detections', 'detection_boxes', 'detection_scores',
          'detection_classes', 'detection_masks'
      ]:
        tensor_name = key + ':0'
        if tensor_name in all_tensor_names:
          tensor_dict[key] = 
tf.get_default_graph().get_tensor_by_name(
              tensor_name)
      if 'detection_masks' in tensor_dict:
        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], 
[0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], 
[0])
        # Reframe is required to translate mask from box coordinates 
to image coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict['num_detections'][0], 
tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], 
[real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], 
[real_num_detection, -1, -1])
        detection_masks_reframed = 
utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, image.shape[0], 
image.shape[1])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict['detection_masks'] = tf.expand_dims(
            detection_masks_reframed, 0)
      image_tensor = 
tf.get_default_graph().get_tensor_by_name('image_tensor:0')

      # Run inference
      output_dict = sess.run(tensor_dict,
                             feed_dict={image_tensor: 
np.expand_dims(image, 0)})

      # all outputs are float32 numpy arrays, so convert types as 
appropriate
      output_dict['num_detections'] = 
int(output_dict['num_detections'][0])
      output_dict['detection_classes'] = output_dict[
          'detection_classes'][0].astype(np.uint8)
      output_dict['detection_boxes'] = output_dict['detection_boxes'] 
[0]
      output_dict['detection_scores'] = 
output_dict['detection_scores'][0]
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = 
output_dict['detection_masks'][0]
  return output_dict

with detection_graph.as_default():
  with tf.Session(graph=detection_graph) as sess:
    while True:
      ret, image_np = cap.read()

     # Expand dimensions since the model expects images to have shape: 
[1, None, None, 3]
      image_np_expanded = np.expand_dims(image_np, axis=0)
      image_tensor = 
detection_graph.get_tensor_by_name('image_tensor:0')
      # Each box represents a part of the image where a particular 
object was detected.
      boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
      # Each score represent how level of confidence for each of the 
objects.
      # Score is shown on the result image, together with the class 
label.
      scores = 
detection_graph.get_tensor_by_name('detection_scores:0')
      classes = 
detection_graph.get_tensor_by_name('detection_classes:0')
      num_detections = 
detection_graph.get_tensor_by_name('num_detections:0')
      # Actual detection.
      (boxes, scores, classes, num_detections) = sess.run(
          [boxes, scores, classes, num_detections],
          feed_dict={image_tensor: image_np_expanded})
      # Visualization of the results of a detection.
      vis_util.visualize_boxes_and_labels_on_image_array(
          image_np,
          np.squeeze(boxes),
          np.squeeze(classes).astype(np.int32),
          np.squeeze(scores),
          category_index,
          use_normalized_coordinates=True,
          line_thickness=8)

      area1 = cv2.rectangle(image_np,(201,267),(355,476), 
   (0,255,0),2)
      area2 = cv2.rectangle(image_np,(354,271),(562,454), 
   (255,0,0),2)
      cv2.imshow("object detection", image_np)

      if 'detection_boxes:0' == 1 in area1[(201,267),(353,468)]:
        print("area1 occupied!")
      else:
        print("area1 free!")

      if 'detection_boxes:1' == 1 in area2[(354,271),(562,454)]:
        print("area2 occupied!")
      else:
        print("area2 free!")

      if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
        cap.release()
        break
我发现很难找到解决办法。请帮忙

技术资料:

张量流1.10

操作系统-Ubuntu 18.04

Python 3.6

OpenCV 3.4.2


谢谢:

您可以使用联合交集进行此操作。如果汽车位于所需的标记矩形中。借条将有一些值,否则它将为零

当car矩形正好位于标记的矩形中时,它将接近1,这将是您的解决方案