Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/docker/10.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python OpenCV2测量图像中对象之间的距离_Python_List_Object_Computer Vision_Opencv3.0 - Fatal编程技术网

Python OpenCV2测量图像中对象之间的距离

Python OpenCV2测量图像中对象之间的距离,python,list,object,computer-vision,opencv3.0,Python,List,Object,Computer Vision,Opencv3.0,我有一个程序,可以测量图像中两个物体之间的距离。但是,程序会自动将最左边的对象作为“参考图像”。例如,我一直在努力将这个参考对象更改为中心的正方形 我可以使用图像来实现这一点的唯一方法是使用Python Pillow来计算图像的中心坐标,并使用这些坐标来创建一个“方形”参考对象。然而,我一直在努力做到这一点。我在下面包含了未更改的代码 任何帮助都将不胜感激 # USAGE # python distance_between.py --image images/example_01.png --w

我有一个程序,可以测量图像中两个物体之间的距离。但是,程序会自动将最左边的对象作为“参考图像”。例如,我一直在努力将这个参考对象更改为中心的正方形

我可以使用图像来实现这一点的唯一方法是使用Python Pillow来计算图像的中心坐标,并使用这些坐标来创建一个“方形”参考对象。然而,我一直在努力做到这一点。我在下面包含了未更改的代码

任何帮助都将不胜感激

# USAGE
# python distance_between.py --image images/example_01.png --width 0.955
# python distance_between.py --image images/example_02.png --width 0.955
# python distance_between.py --image images/example_03.png --width 3.5
# python distance_between.py --image images/example_04.jpg --width 1.0

# import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2

def midpoint(ptA, ptB):
    return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
    help="path to the input image")
ap.add_argument("-w", "--width", type=float, required=True,
    help="width of the left-most object in the image (in inches)")
args = vars(ap.parse_args())

# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)

# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# sort the contours from left-to-right and, then initialize the
# distance colors and reference object
(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (0, 165, 255), (255, 255, 0),
    (255, 0, 255))
refObj = None

# loop over the contours individually
for c in cnts:
    # if the contour is not sufficiently large, ignore it
    if cv2.contourArea(c) < 100:
        continue

    # compute the rotated bounding box of the contour
    box = cv2.minAreaRect(c)
    box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
    box = np.array(box, dtype="int")

    # order the points in the contour such that they appear
    # in top-left, top-right, bottom-right, and bottom-left
    # order, then draw the outline of the rotated bounding
    # box
    box = perspective.order_points(box)

    # compute the center of the bounding box
    cX = np.average(box[:, 0])
    cY = np.average(box[:, 1])

    # if this is the first contour we are examining (i.e.,
    # the left-most contour), we presume this is the
    # reference object
    if refObj is None:
        # unpack the ordered bounding box, then compute the
        # midpoint between the top-left and top-right points,
        # followed by the midpoint between the top-right and
        # bottom-right
        (tl, tr, br, bl) = box
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # compute the Euclidean distance between the midpoints,
        # then construct the reference object
        D = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
        refObj = (box, (cX, cY), D / args["width"])
        continue

    # draw the contours on the image
    orig = image.copy()
    cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
    cv2.drawContours(orig, [refObj[0].astype("int")], -1, (0, 255, 0), 2)

    # stack the reference coordinates and the object coordinates
    # to include the object center
    refCoords = np.vstack([refObj[0], refObj[1]])
    objCoords = np.vstack([box, (cX, cY)])
       
    # loop over the original points
    for ((xA, yA), (xB, yB), color) in zip(refCoords, objCoords, colors):
        # draw circles corresponding to the current points and
        # connect them with a line
        cv2.circle(orig, (int(xA), int(yA)), 5, color, -1)
        cv2.circle(orig, (int(xB), int(yB)), 5, color, -1)
        cv2.line(orig, (int(xA), int(yA)), (int(xB), int(yB)),
            color, 2)
        # compute the Euclidean distance between the coordinates,
        # and then convert the distance in pixels to distance in
        # units
        D = dist.euclidean((xA, yA), (xB, yB)) / refObj[2]
        (mX, mY) = midpoint((xA, yA), (xB, yB))
        cv2.putText(orig, "{:.1f}in".format(D), (int(mX), int(mY - 10)),
            cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 2)

        # show the output image
        cv2.imshow("Image", orig)
        cv2.waitKey(0)
#用法
#python distance_between.py--image images/example_01.png--width 0.955
#python distance_between.py--image images/example_02.png--width 0.955
#python distance_between.py--image images/example_03.png--width 3.5
#python distance_between.py--image image/example_04.jpg--width 1.0
#导入必要的包
从scipy.spatial导入距离作为距离
从imutils导入的角度
从imutils导入等高线
将numpy作为np导入
导入argparse
导入imutils
进口cv2
def中点(ptA、ptB):
回报率((ptA[0]+ptB[0])*0.5,(ptA[1]+ptB[1])*0.5)
#构造参数并解析参数
ap=argparse.ArgumentParser()
ap.add_参数(“-i”,“--image”,required=True,
help=“输入图像的路径”)
ap.add_参数(“-w”,“--width”,type=float,required=True,
help=“图像中最左侧对象的宽度(以英寸为单位)”
args=vars(ap.parse_args())
#加载图像,将其转换为灰度,然后稍微模糊
image=cv2.imread(args[“image”])
灰色=cv2.CVT颜色(图像,cv2.COLOR\u BGR2GRAY)
灰色=cv2.高斯模糊(灰色,(7,7,0)
#执行边缘检测,然后对图像执行膨胀+腐蚀
#闭合对象边之间的间隙
边缘=cv2.Canny(灰色,50100)
边缘=cv2。扩张(边缘,无,迭代次数=1)
边缘=cv2。侵蚀(边缘,无,迭代次数=1)
#在边缘贴图中查找等高线
cnts=cv2.findContentours(edge.copy(),cv2.RETR_EXTERNAL,
cv2.链条(近似简单)
cnts=imutils.GRAP_轮廓(cnts)
#从左到右排序等高线,然后初始化
#距离颜色和参考对象
(CNT,U)=等高线。等高线排序(CNT)
颜色=((0,0,255),(240,0,159),(0,165,255),(255,255,0),
(255, 0, 255))
REOBJ=无
#分别在轮廓上循环
对于碳纳米管中的碳:
#如果轮廓不够大,请忽略它
如果cv2.轮廓面积(c)<100:
持续
#计算轮廓的旋转边界框
box=cv2.Minareact(c)
如果imutils.is_cv2()则box=cv2.cv.BoxPoints(box),否则为cv2.BoxPoints(box)
box=np.array(box,dtype=“int”)
#对轮廓中的点进行排序,使其显示
#在左上角、右上角、右下角和左下角
#顺序,然后绘制旋转边界的轮廓
#盒子
框=透视图。顺序点(框)
#计算边界框的中心
cX=np.平均值(框[:,0])
cY=np.平均值(方框[:,1])
#如果这是我们正在检查的第一个轮廓(即。,
#最左边的轮廓),我们假定这是
#参考对象
如果refObj为无:
#打开有序边界框,然后计算
#左上点和右上点之间的中点,
#然后是右上角和右上角之间的中点
#右下角
(tl、tr、br、bl)=盒子
(tlblX,tlblY)=中点(tl,bl)
(trbrX,trbrY)=中点(tr,br)
#计算中点之间的欧几里德距离,
#然后构造引用对象
D=距离欧几里德((tlblX,tlblY),(trbrX,trbrY))
refObj=(方框,(cX,cY),D/args[“宽度”])
持续
#在图像上绘制轮廓
orig=image.copy()
cv2.绘制轮廓(原始[box.aType(“int”)],-1,(0,255,0),2)
cv2.drawContours(原始[refObj[0].aType(“int”)],-1,(0,255,0),2)
#堆叠参考坐标和对象坐标
#包含对象中心的步骤
refCoords=np.vstack([refObj[0],refObj[1]]))
objCoords=np.vstack([box,(cX,cY)])
#在原始点上循环
对于拉链中的((xA,yA),(xB,yB),颜色)(参考坐标系,对象坐标系,颜色):
#绘制与当前点对应的圆,并
#用线把它们连接起来
cv2.圆(原点,(int(xA),int(yA)),5,颜色,-1)
cv2.圆(原点,(整数(xB),整数(yB)),5,颜色,-1)
cv2.直线(原点,(int(xA),int(yA)),(int(xB),int(yB)),
颜色,2)
#计算坐标之间的欧几里德距离,
#然后将以像素为单位的距离转换为以像素为单位的距离
#单位
D=距离欧几里德((xA,yA),(xB,yB))/REFBJ[2]
(mX,mY)=中点((xA,yA),(xB,yB))
cv2.putText(orig,“{.1f}in.”格式(D),(int(mX),int(mY-10)),
cv2.FONT\U HERSHEY\U SIMPLEX,0.55,彩色,2)
#显示输出图像
cv2.imshow(“图像”,原版)
cv2.等待键(0)