Python 是否有任何方法可以改进我们的模型,使用OpenCV文本检测(EAST文本检测器)检测图片中的街道标志?

Python 是否有任何方法可以改进我们的模型,使用OpenCV文本检测(EAST文本检测器)检测图片中的街道标志?,python,opencv,ocr,east-text-detector,Python,Opencv,Ocr,East Text Detector,在我们的项目中,我们试图仅使用python中的EAST文本检测器检测街道标志。在做了一些小改动之后,我们设法让一些代码正常工作,但我们的模型并没有那么精确。在大多数情况下,它给出的假阳性比实际的真阳性更多。它还检测某些图像中的其他文本,但我们仍然需要训练模型,以便仅预测街道标志。荷兰的街道标志通常是白色字母加蓝色背景 在一些图片中,该模型甚至预测了一些甚至不是文本的图片片段,例如一棵树的片段、阳台等 我的问题:有没有办法改进这个模型,使它只预测街道标志 指向EAST文本检测Github的链接

在我们的项目中,我们试图仅使用python中的EAST文本检测器检测街道标志。在做了一些小改动之后,我们设法让一些代码正常工作,但我们的模型并没有那么精确。在大多数情况下,它给出的假阳性比实际的真阳性更多。它还检测某些图像中的其他文本,但我们仍然需要训练模型,以便仅预测街道标志。荷兰的街道标志通常是白色字母加蓝色背景

在一些图片中,该模型甚至预测了一些甚至不是文本的图片片段,例如一棵树的片段、阳台等

我的问题:有没有办法改进这个模型,使它只预测街道标志

指向EAST文本检测Github的链接:

#导入必要的软件包
从imutils.object\u detection导入非最大值抑制
将numpy作为np导入
作为pd进口熊猫
导入argparse
导入时间
进口cv2
定时=[]
对于范围内的m(5704):
#加载输入图像并获取图像尺寸
image=cv2.imread('/ext/PM track data/PM_BAM-data/Breda-ring_22-04-2020_13-57-38/360/'+str(m)+'.jpeg'))
orig=image.copy()
(H,W)=图像形状[:2]
#设置新的宽度和高度,然后确定变化的比率
#无论是宽度还是高度
(newW,newH)=(40962048)
rW=W/浮动(新)
rH=H/浮子(新)
#调整图像大小并获取新的图像尺寸
(newW,newH)=(newW,newH)
(H,W)=图像形状[:2]
#为所使用的EAST探测器模型定义两个输出图层名称
#我们感兴趣的是——第一个是输出概率和
#第二个可用于导出文本的边界框坐标
层名称=[
“特征融合/转换7/Sigmoid”,
“特征融合/融合3”]
#加载预先训练的EAST文本检测器
net=cv2.dnn.readNet('freezed\u east\u text\u detection.pb')
#从图像构造一个blob,然后向前传递
#获取两个输出层集的模型
blob=cv2.dnn.blobFromImage(图像,1.0,(宽,高),
(123.68116.78103.94),swapRB=真,crop=假)
开始=时间。时间()
net.setInput(blob)
(分数、几何)=净向前(层名称)
end=time.time()
#显示文本预测的计时信息
tijd=结束-开始
定时追加(tijd)
#从分数卷中获取行数和列数,然后
#初始化边界框矩形集和相应的
#信心分数
(numRows,numCols)=分数。形状[2:4]
rects=[]
信任=[]
#循环计算行数
对于范围内的y(0,numRows):
#提取分数(概率),然后是几何分数
#用于导出潜在边界框坐标的数据
#环绕文本
分数数据=分数[0,0,y]
xData0=几何体[0,0,y]
xData1=几何体[0,1,y]
xData2=几何体[0,2,y]
xData3=几何体[0,3,y]
角度数据=几何体[0,4,y]
#循环计算列数
对于范围内的x(0,numCols):
#如果我们的分数没有足够的概率,忽略它
#如果得分数据[x]
# import the necessary packages
from imutils.object_detection import non_max_suppression
import numpy as np
import pandas as pd
import argparse
import time
import cv2

timing = []

for m in range(5704):
    # load the input image and grab the image dimensions
    image = cv2.imread('/ext/PM-track-data/PM_BAM-data/Breda-ring_22-04-2020_13-57-38/360/' + str(m) + '.jpeg')
    orig = image.copy()
    (H, W) = image.shape[:2]

# set the new width and height and then determine the ratio in change
# for both the width and height
(newW, newH) = (4096, 2048)
rW = W / float(newW)
rH = H / float(newH)

# resize the image and grab the new image dimensions
(newW, newH) = (newW, newH)
(H, W) = image.shape[:2]

# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
layerNames = [
    "feature_fusion/Conv_7/Sigmoid",
    "feature_fusion/concat_3"]

# load the pre-trained EAST text detector
net = cv2.dnn.readNet('frozen_east_text_detection.pb')

# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
    (123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()

# show timing information on text prediction
tijd = end - start

timing.append(tijd)

# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []

# loop over the number of rows
for y in range(0, numRows):
    # extract the scores (probabilities), followed by the geometrical
    # data used to derive potential bounding box coordinates that
    # surround text
    scoresData = scores[0, 0, y]
    xData0 = geometry[0, 0, y]
    xData1 = geometry[0, 1, y]
    xData2 = geometry[0, 2, y]
    xData3 = geometry[0, 3, y]
    anglesData = geometry[0, 4, y]

    # loop over the number of columns
    for x in range(0, numCols):
        # if our score does not have sufficient probability, ignore it
        #if scoresData[x] < args["min_confidence"]:
        if scoresData[x] < 0.3:
            continue

        # compute the offset factor as our resulting feature maps will
        # be 4x smaller than the input image
        (offsetX, offsetY) = (x * 4.0, y * 4.0)

        # extract the rotation angle for the prediction and then
        # compute the sin and cosine
        angle = anglesData[x]
        cos = np.cos(angle)
        sin = np.sin(angle)

        # use the geometry volume to derive the width and height of
        # the bounding box
        h = xData0[x] + xData2[x]
        w = xData1[x] + xData3[x]

        # compute both the starting and ending (x, y)-coordinates for
        # the text prediction bounding box
        endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
        endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
        startX = int(endX - w)
        startY = int(endY - h)

        # add the bounding box coordinates and probability score to
        # our respective lists
        rects.append((startX, startY, endX, endY))
        confidences.append(scoresData[x])

# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)

# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
    # scale the bounding box coordinates based on the respective
    # ratios
    startX = int(startX * rW)
    startY = int(startY * rH)
    endX = int(endX * rW)
    endY = int(endY * rH)

    # draw the bounding box on the image
    cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
    
# show the output image
cv2.imwrite('file path' + str(m) + '.jpeg', orig)
print('Foto: ' + str(m) + ' is klaar')