Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/362.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 使用cv2.HoughCircles检测外圆的困难_Python_Image_Opencv_Image Processing_Computer Vision - Fatal编程技术网

Python 使用cv2.HoughCircles检测外圆的困难

Python 使用cv2.HoughCircles检测外圆的困难,python,image,opencv,image-processing,computer-vision,Python,Image,Opencv,Image Processing,Computer Vision,我试图检测下图中圆形物体的外边界: 我尝试了OpenCV的Hough Circle,但代码并不适用于所有图像。我还尝试在Hough Circle中调整参数,如minRadius和maxRadius,但这并不是对所有图像都有效 目标是从图像中检测对象并对其进行裁剪。 预期产出: 源代码: import imutils import cv2 import numpy as np from matplotlib import pyplot as plt image = cv2.imrea

我试图检测下图中圆形物体的外边界:

我尝试了OpenCV的Hough Circle,但代码并不适用于所有图像。我还尝试在Hough Circle中调整参数,如
minRadius
maxRadius
,但这并不是对所有图像都有效

目标是从图像中检测对象并对其进行裁剪。

预期产出:

源代码:

import imutils
import cv2
import numpy as np
from matplotlib import pyplot as plt


image = cv2.imread("path to the image i have provided")
r = 600.0 / image.shape[1]
dim = (600, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("path to were we want to save downscaled image", resized)


image = cv2.imread('path of downscaled image')
image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = cv2.GaussianBlur(image1, (5, 5), 0)
edged = cv2.Canny(image2, 30, 150)

img = cv2.medianBlur(image2,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)

circles = cv2.HoughCircles(edged,cv2.HOUGH_GRADIENT,1,20,
                            param1=50,param2=30,minRadius=200,maxRadius=280)

circles = np.uint16(np.around(circles))

max_circle = max(circles[0,:], key=lambda x:x[2])
# print(max_circle)

# # Create mask
height,width = image1.shape
mask = np.zeros((height,width), np.uint8)


for i in [max_circle]:
    cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=-1)  


masked_data = cv2.bitwise_and(image, image, mask=mask)

_,thresh = cv2.threshold(mask,1,255,cv2.THRESH_BINARY)

# Find Contour
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
x,y,w,h = cv2.boundingRect(contours[0])

# Crop masked_data
crop = masked_data[y:y+h,x:x+w]

#Code to close Window
cv2.imshow('OG',image)
cv2.imshow('Cropped ROI',crop)
cv2.imwrite("path to save roi image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()

import cv2
import numpy as np
import sys


img = cv2.imread("test_images/reel.jpg")
if (img is None):
    print('!!! Failed imread')
    sys.exit(-1)

# create output image
output_img = img.copy()

# 1. Preprocess the image: downscale to speed up processing and execute a blur
SCALE_FACTOR = 0.5
smaller_img = cv2.resize(img, dsize=(0, 0), fx=SCALE_FACTOR, fy=SCALE_FACTOR)
blur_img = cv2.medianBlur(smaller_img, 9)
cv2.imwrite('reel1_blur_img.png', blur_img)


# 2. Segment the image to identify the 2 most important contours: the center of the reel and the outter edge
gray_img = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)
img_bin = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 19, 4)
cv2.imwrite('reel2_img_bin.png', img_bin)

green_mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
#green_mask = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) # debug

contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    x, y, w, h = cv2.boundingRect(cnt)
    area = cv2.contourArea(contours[contourIdx])
    #print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)

    # filter out tiny segments
    if (area < 5000):
        #cv2.fillPoly(green_mask, pts=[cnt], color=(0, 0, 255)) # red
        continue

    # draw green contour (filled)
    #cv2.fillPoly(green_mask, pts=[cnt], color=(0, 255, 0)) # green
    cv2.fillPoly(green_mask, pts=[cnt], color=(255)) # white

    # debug:
    #cv2.imshow('green_mask', green_mask)
    #cv2.waitKey(0)

cv2.imshow('green_mask', green_mask)
cv2.imwrite('reel2_green_mask.png', green_mask)


# 3. Fix mask: join segments nearby
kernel = np.ones((3,3), np.uint8)
img_dilation = cv2.dilate(green_mask, kernel, iterations=1)
green_mask = cv2.erode(img_dilation, kernel, iterations=1)

cv2.imshow('fixed green_mask', green_mask)
cv2.imwrite('reel3_img.png', green_mask)


# 4. Extract the reel area from the green mask
reel_mask = np.zeros((green_mask.shape[0], green_mask.shape[1]), np.uint8)
#reel_mask = cv2.cvtColor(green_mask, cv2.COLOR_GRAY2RGB) # debug

contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    x, y, w, h = cv2.boundingRect(cnt)
    area = cv2.contourArea(contours[contourIdx])
    print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)

    # filter out smaller segments
    if (area > 110000):
        #cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 0, 255)) # red
        continue

    # draw green contour (filled)
    #cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 255, 0)) # green
    cv2.fillPoly(reel_mask, pts=[cnt], color=(255)) # white

    # debug:
    #cv2.imshow('reel_mask', reel_mask)
    #cv2.waitKey(0)

cv2.imshow('reel_mask', reel_mask)
cv2.imwrite('reel4_reel_mask.png', reel_mask)


# 5. Draw the reel area on the original image
contours, hierarchy = cv2.findContours(reel_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    centers, radius = cv2.minEnclosingCircle(cnt)

    # rescale these values back to the original image size
    centers_orig = (centers[0] // SCALE_FACTOR, centers[1] // SCALE_FACTOR)
    radius_orig = radius // SCALE_FACTOR

print('centers=', centers_orig, 'radius=', radius_orig)
cv2.circle(output_img, (int(centers_orig[0]), int(centers_orig[1])), int(radius_orig), (128,0,255), 5) # magenta

cv2.imshow('output_img', output_img)
cv2.imwrite('reel5_output.png', output_img)

# display just the pixels from the original image
larger_reel_mask = cv2.resize(reel_mask, (int(img.shape[1]), int(img.shape[0])))
output_reel_img = cv2.bitwise_and(img, img, mask=larger_reel_mask)

cv2.imshow('output_reel_img', output_reel_img)
cv2.imwrite('reel5_output_reel.png', output_reel_img)
cv2.waitKey(0)

第一个答案:一种基于图像预处理和执行自适应阈值操作的方法

解决此问题的其他方法可能不是基于Hough圆。以下是一种方法的结果:

  • 对图像进行预处理!减小图像大小并执行模糊有助于分割:

  • 分割方法使用
    cv2.adaptiveThreshold()
    创建二值图像,以保留最重要的对象:卷盘的中心和卷盘的外部边缘。这是一个重要的步骤,因为我们只对这两个对象之间存在的内容感兴趣。然而,生活并不完美,这种分割也不完美。桌上卷轴的阴影成为检测到的二进制对象的一部分。此外,外边缘没有完全连接,正如您在右侧生成的图像上所看到的那样(查看圆周的左上角):

  • 要连接断开的线段,可以执行形态学操作:

  • 最后,整个卷轴区域可以通过迭代上面图像的轮廓来曝光,并丢弃那些面积大于卷轴预期面积的区域。然后,生成的二值图像(左侧)可用作遮罩,以识别原始图像上的卷轴位置:

请记住,我并不是要为你的问题找到一个通用的解决方案。我只是想说明可能还有其他不依赖于Hough圆的解决方案。

此外,此代码可能需要进行一些调整才能处理更多的情况

源代码:

import imutils
import cv2
import numpy as np
from matplotlib import pyplot as plt


image = cv2.imread("path to the image i have provided")
r = 600.0 / image.shape[1]
dim = (600, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("path to were we want to save downscaled image", resized)


image = cv2.imread('path of downscaled image')
image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = cv2.GaussianBlur(image1, (5, 5), 0)
edged = cv2.Canny(image2, 30, 150)

img = cv2.medianBlur(image2,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)

circles = cv2.HoughCircles(edged,cv2.HOUGH_GRADIENT,1,20,
                            param1=50,param2=30,minRadius=200,maxRadius=280)

circles = np.uint16(np.around(circles))

max_circle = max(circles[0,:], key=lambda x:x[2])
# print(max_circle)

# # Create mask
height,width = image1.shape
mask = np.zeros((height,width), np.uint8)


for i in [max_circle]:
    cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=-1)  


masked_data = cv2.bitwise_and(image, image, mask=mask)

_,thresh = cv2.threshold(mask,1,255,cv2.THRESH_BINARY)

# Find Contour
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
x,y,w,h = cv2.boundingRect(contours[0])

# Crop masked_data
crop = masked_data[y:y+h,x:x+w]

#Code to close Window
cv2.imshow('OG',image)
cv2.imshow('Cropped ROI',crop)
cv2.imwrite("path to save roi image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()

import cv2
import numpy as np
import sys


img = cv2.imread("test_images/reel.jpg")
if (img is None):
    print('!!! Failed imread')
    sys.exit(-1)

# create output image
output_img = img.copy()

# 1. Preprocess the image: downscale to speed up processing and execute a blur
SCALE_FACTOR = 0.5
smaller_img = cv2.resize(img, dsize=(0, 0), fx=SCALE_FACTOR, fy=SCALE_FACTOR)
blur_img = cv2.medianBlur(smaller_img, 9)
cv2.imwrite('reel1_blur_img.png', blur_img)


# 2. Segment the image to identify the 2 most important contours: the center of the reel and the outter edge
gray_img = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)
img_bin = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 19, 4)
cv2.imwrite('reel2_img_bin.png', img_bin)

green_mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
#green_mask = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) # debug

contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    x, y, w, h = cv2.boundingRect(cnt)
    area = cv2.contourArea(contours[contourIdx])
    #print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)

    # filter out tiny segments
    if (area < 5000):
        #cv2.fillPoly(green_mask, pts=[cnt], color=(0, 0, 255)) # red
        continue

    # draw green contour (filled)
    #cv2.fillPoly(green_mask, pts=[cnt], color=(0, 255, 0)) # green
    cv2.fillPoly(green_mask, pts=[cnt], color=(255)) # white

    # debug:
    #cv2.imshow('green_mask', green_mask)
    #cv2.waitKey(0)

cv2.imshow('green_mask', green_mask)
cv2.imwrite('reel2_green_mask.png', green_mask)


# 3. Fix mask: join segments nearby
kernel = np.ones((3,3), np.uint8)
img_dilation = cv2.dilate(green_mask, kernel, iterations=1)
green_mask = cv2.erode(img_dilation, kernel, iterations=1)

cv2.imshow('fixed green_mask', green_mask)
cv2.imwrite('reel3_img.png', green_mask)


# 4. Extract the reel area from the green mask
reel_mask = np.zeros((green_mask.shape[0], green_mask.shape[1]), np.uint8)
#reel_mask = cv2.cvtColor(green_mask, cv2.COLOR_GRAY2RGB) # debug

contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    x, y, w, h = cv2.boundingRect(cnt)
    area = cv2.contourArea(contours[contourIdx])
    print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)

    # filter out smaller segments
    if (area > 110000):
        #cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 0, 255)) # red
        continue

    # draw green contour (filled)
    #cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 255, 0)) # green
    cv2.fillPoly(reel_mask, pts=[cnt], color=(255)) # white

    # debug:
    #cv2.imshow('reel_mask', reel_mask)
    #cv2.waitKey(0)

cv2.imshow('reel_mask', reel_mask)
cv2.imwrite('reel4_reel_mask.png', reel_mask)


# 5. Draw the reel area on the original image
contours, hierarchy = cv2.findContours(reel_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
    centers, radius = cv2.minEnclosingCircle(cnt)

    # rescale these values back to the original image size
    centers_orig = (centers[0] // SCALE_FACTOR, centers[1] // SCALE_FACTOR)
    radius_orig = radius // SCALE_FACTOR

print('centers=', centers_orig, 'radius=', radius_orig)
cv2.circle(output_img, (int(centers_orig[0]), int(centers_orig[1])), int(radius_orig), (128,0,255), 5) # magenta

cv2.imshow('output_img', output_img)
cv2.imwrite('reel5_output.png', output_img)

# display just the pixels from the original image
larger_reel_mask = cv2.resize(reel_mask, (int(img.shape[1]), int(img.shape[0])))
output_reel_img = cv2.bitwise_and(img, img, mask=larger_reel_mask)

cv2.imshow('output_reel_img', output_reel_img)
cv2.imwrite('reel5_output_reel.png', output_reel_img)
cv2.waitKey(0)
导入cv2
将numpy作为np导入
导入系统
img=cv2.imread(“test_images/revel.jpg”)
如果(img为无):
打印(“!!!imread失败”)
系统出口(-1)
#创建输出图像
输出\u img=img.copy()
# 1. 预处理图像:缩小比例以加快处理并执行模糊
比例系数=0.5
更小的img=cv2。调整大小(img,dsize=(0,0),fx=比例系数,fy=比例系数)
模糊度=cv2.medianBlur(更小,9)
cv2.imwrite('rele1\u blur\u img.png',blur\u img)
# 2. 分割图像以确定两个最重要的轮廓:卷盘中心和输出边缘
灰色=cv2.CVT颜色(模糊、cv2.COLOR\U BGR2GRAY)
img\u bin=cv2.自适应阈值(灰色\u img,255,cv2.自适应\u阈值\u均值\u C,cv2.阈值\u二进制\u INV,19,4)
cv2.imwrite('reel2\u img\u bin.png',img\u bin)
绿色遮罩=np.0((img_bin.shape[0],img_bin.shape[1]),np.uint8)
#绿色遮罩=cv2.CVT颜色(img_bin,cv2.COLOR_GRAY2RGB)#调试
轮廓,层次=cv2.findContours(img_bin,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
对于轮廓,枚举中的cnt(轮廓):
x、 y,w,h=cv2.boundingRect(cnt)
面积=cv2.轮廓面积(轮廓[contourIdx])
#打印('contourIdx=',contourIdx',w=',w',h=',h',area=',area)
#过滤掉细小的片段
如果(面积<5000):
#cv2.fillPoly(绿色遮罩,pts=[cnt],颜色=(0,0255))#红色
持续
#绘制绿色等高线(填充)
#cv2.fillPoly(绿色(u mask,pts=[cnt],颜色=(0255,0))35;绿色
cv2.fillPoly(绿色遮罩,pts=[cnt],颜色=(255))#白色
#调试:
#cv2.imshow(“绿色遮罩”,绿色遮罩)
#cv2.等待键(0)
cv2.imshow(“绿色遮罩”,绿色遮罩)
cv2.imwrite('REELL2_green_mask.png',green_mask)
# 3. 修复遮罩:连接附近的线段
内核=np.ones((3,3),np.uint8)
img_displate=cv2.displate(绿色掩码,内核,迭代次数=1)
green_mask=cv2.腐蚀(img_膨胀,内核,迭代次数=1)
cv2.imshow(‘固定绿色遮罩’、绿色遮罩)
cv2.imwrite('REELL3\u img.png',绿色遮罩)
# 4. 从绿色遮罩中提取卷轴区域
卷盘屏蔽=np.0((绿色屏蔽形状[0],绿色屏蔽形状[1]),np.uint8)
#reel_mask=cv2.CVT彩色(绿色_mask,cv2.COLOR_GRAY2RGB)#调试
轮廓,层次=cv2.findContours(绿色遮罩,cv2.RETR\u列表,cv2.CHAIN\u近似值\u简单)
对于轮廓,枚举中的cnt(轮廓):
x、 y,w,h=cv2.boundingRect(cnt)
面积=cv2.轮廓面积(轮廓[contourIdx])
打印('contourIdx=',contourIdx',w=',w',h=',h',area=',area)
#过滤掉较小的片段
如果(面积>110000):
#cv2.fillPoly(卷轴遮罩,pts=[cnt],颜色=(0,0255))#红色
持续
#绘制绿色等高线(填充)
#cv2.fillPoly(卷轴遮罩,pts=[cnt],颜色=(0255,0))#绿色
cv2.fillPoly(卷轴遮罩,pts=[cnt],颜色=(255))#白色
#调试:
#cv2.imshow(“卷轴遮罩”,卷轴遮罩)
#cv2.等待键(0)
cv2.imshow(“卷轴遮罩”,卷轴遮罩)
cv2.imwrite('REELL4\U reel\U mask.png',reel\U mask)
# 5. 在原始图像上绘制卷轴区域
轮廓,层次=cv2.findContours(卷轴掩码,cv2.RETR\u列表,cv2.CHAIN\u近似值\u简单)
对于轮廓,枚举中的cnt(轮廓):
圆心,半径=cv2。MineConclosingCircle(cnt)
#将这些值重新缩放回原始图像大小
中心源=(中心[0]//比例因子,中心[1]//比例因子)
半径=半径//比例系数
打印('centers=',centers\u orig',radius=',radius\u orig)
cv2.圆(输出img,(int(中心原点[0]),int(中心原点[1]),int(半径原点),(128,0255),5)品红色
cv2.imshow('output\u img',output\u img)
cv2.imwrite('REELL5_output.png',output_img)
#仅显示原始图像中的像素
较大的卷轴遮罩=cv2.调整大小(卷轴遮罩,(int(img.shape[1]),int(img.shape[0]))
输出卷盘