Python Tensorflow目标检测API:如何提高图像的检测分数

Python Tensorflow目标检测API:如何提高图像的检测分数,python,tensorflow,ocr,data-extraction,Python,Tensorflow,Ocr,Data Extraction,使用Tensorflow API检测API发现了检测分数较低的问题, 不知道如何提高检测分数,而使用较低的检测分数获取索引器:列表索引超出范围 需要关于如何删除错误的建议吗 image_path = "C:/Users/Documents/pdf2txt/invoice.jpg" def run_inference_for_single_image(image, graph): with graph.as_default(): with tf.Session()

使用Tensorflow API检测API发现了检测分数较低的问题, 不知道如何提高检测分数,而使用较低的检测分数获取索引器:列表索引超出范围

需要关于如何删除错误的建议吗

image_path = "C:/Users/Documents/pdf2txt/invoice.jpg"

def run_inference_for_single_image(image, graph):
  with graph.as_default():
    with tf.Session() as sess:
      # Get handles to input and output tensors
      ops = tf.get_default_graph().get_operations()
      all_tensor_names = {output.name for op in ops for output in op.outputs}
      tensor_dict = {}
      for key in [
          'num_detections', 'detection_boxes', 'detection_scores',
          'detection_classes', 'detection_masks'
      ]:
        tensor_name = key + ':0'
        if tensor_name in all_tensor_names:
          tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
              tensor_name)
      if 'detection_masks' in tensor_dict:
        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
        # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, image.shape[0], image.shape[1])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict['detection_masks'] = tf.expand_dims(
            detection_masks_reframed, 0)
      image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

      # Run inference
      output_dict = sess.run(tensor_dict,
                             feed_dict={image_tensor: np.expand_dims(image, 0)})

      # all outputs are float32 numpy arrays, so convert types as appropriate
      output_dict['num_detections'] = int(output_dict['num_detections'][0])
      output_dict['detection_classes'] = output_dict[
          'detection_classes'][0].astype(np.uint8)
      output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
      output_dict['detection_scores'] = output_dict['detection_scores'][0]
      print(output_dict['detection_scores'])
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = output_dict['detection_masks'][0]
  return output_dict
对于测试图像路径中的图像路径:

image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
output_dict = run_inference_for_single_image(image_np, detection_graph)
outImage = Image.fromarray(image_np)



firstResult = output_dict['detection_boxes'][0]
firstArray = []

score = output_dict['detection_scores'][0]
print(score)
# if score > float(0.85):
for coords in firstResult:
  realCoord = coords*1024
  firstArray.append(realCoord)

  firstImage = image.crop((firstArray[1], firstArray[0],firstArray[3],firstArray[2]))

  outputClass = output_di ct['detection_classes'][0]
  parameter =  CLASSES[outputClass - 1]
  coordText = str(firstArray[1]) + " " + str(firstArray[0]) + " " + str(firstArray[3]) + " " +str(firstArray[2]) + " " + parameter + 'xout1.tif'
  coordsFile.write(coordText + "\n")
  firstImage.save(r'C:/Users/neerajjha/Documents/pdf2txt/object_detection/Results/' + parameter + 'xout1.tif')
  print(coordsFile)
输出:

Traceback (most recent call last):
  File "c:/Users/Documents/pdf2txt/server_detection.py", line 260, in <module>
    firstImage = image.crop((firstArray[1], firstArray[0],firstArray[3],firstArray[2]))

IndexError: list index out of range
回溯(最近一次呼叫最后一次):
文件“c:/Users/Documents/pdf2txt/server_detection.py”,第260行,在
firstImage=image.crop((firstArray[1],firstArray[0],firstArray[3],firstArray[2]))
索引器:列表索引超出范围

请建议

我认为问题在于这段代码:

for coords in firstResult:
  realCoord = coords*1024
  firstArray.append(realCoord)

  firstImage = image.crop((firstArray[1], firstArray[0],firstArray[3],firstArray[2]))

第一个结果应该包含模型检测到的边界框的4个坐标。能否尝试将最后一行移出
for循环
,以便在
image.crop
函数中使用前将所有4个值添加到第一个数组中?

请建议如何解决此问题。在前面的代码中,我使用了一个空白数组。请参考它。我建议删除前3行(for loop、realcoord和firstarray),直接使用以下边界框值:
firstImage=image.crop((firstResult[1]*1024,firstResult[0]*1024,firstResult[3]*1024,firstResult[2]*1024))
。假设您的输入图像是1024x1024。。