Python 分水岭分割:无法分割某些单元格

Python 分水岭分割:无法分割某些单元格,python,opencv,image-segmentation,scikit-image,Python,Opencv,Image Segmentation,Scikit Image,我试图分割细胞核的图像,但最终得到的结果是分割不足。出现一些大的斑点,理想情况下应该分解成更小的对象,如右边缘的那些,见下文 我能做些什么来处理这件事吗。我想使用分水岭分割(或者skimage或者opencv)来分割这些大细胞,就像右边缘中间的蓝色细胞一样 到目前为止,我的代码如下所示: def segment_dapi(img_in): img = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY) kernel = np.ones((3, 3)

我试图分割细胞核的图像,但最终得到的结果是分割不足。出现一些大的斑点,理想情况下应该分解成更小的对象,如右边缘的那些,见下文

我能做些什么来处理这件事吗。我想使用分水岭分割(或者
skimage
或者
opencv
)来分割这些大细胞,就像右边缘中间的蓝色细胞一样

到目前为止,我的代码如下所示:

def segment_dapi(img_in):
    img = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
    kernel = np.ones((3, 3), np.uint8)

    # set the parameters
    thresh = 90
    min_size = 5


    # Adjust brightness
    lims = stretchlim(img)
    img_adj = imadjust(img, lims)

    # Threshold the image
    thres_val = np.percentile(img, thresh)

    _, bw_img = cv2.threshold(img_adj, thres_val, 255, cv2.THRESH_BINARY)

    # Apply morphology opening to remove small objects
    img_obj = cv2.morphologyEx(bw_img, cv2.MORPH_OPEN, kernel, iterations=1)

    bg = cv2.dilate(img_obj, kernel, iterations=1)  # black points belong to the background

    # white points (value = 255) belong to the foreground
    dist_transform = cv2.distanceTransform(img_obj, cv2.DIST_L2, 3)
    _, fg = cv2.threshold(dist_transform, min_size, 255, cv2.THRESH_BINARY)
    fg = np.uint8(fg)
    fg_temp = 255/fg.max() * fg

    x = cv2.subtract(bg, fg)

    _, markers = cv2.connectedComponents(fg)
    markers = markers + 1  # prevent the markers from having values = 0
    markers[x == 255] = 0

    '''
    markers:
    > 1: absolute foreground
    = 1: absolute background
    = 0: unknown area (TBD by watershed)
    '''
    markers = cv2.watershed(img_in, markers)
    img_in[markers == -1] = [0, 255, 255]
    cv2.imwrite('watershed_borders.tif', img_in);
    small_img = cv2.resize(img_in, None, fx=1/2, fy=1/2)
    # cv2.imshow('Overlay', small_img)
    # cv2.waitKey(0)
    ''' 
    markers after watershed:
    = 0: background (set by watershed)
    = 1: background (because the markers have been shifted by 1)
    > 1: object labels 
    - 1: borders between object
    '''

    markers[markers>0] = markers[markers>0]-1
    markers[markers == -1] = 0

    print(markers.max())
    overlay = color.label2rgb(markers, bg_label=0)

    my_dpi = 72
    fig, ax = plt.subplots(figsize=(6000 / my_dpi, 6000 / my_dpi), dpi=my_dpi)
    plt.imshow(overlay)
    ax.set_axis_off()
    plt.tight_layout()
    plt.show()



def stretchlim(img):
    nbins = 255
    tol_low = 0.01
    tol_high = 0.99
    sz = np.shape(img)
    if len(sz) == 2:
        img = img[:, :, None]
        sz = np.shape(img)

    p = sz[2]
    ilowhigh = np.zeros([2, p])
    for i in range(0,p):
        hist,bins = np.histogram(img[:, :, i].ravel(), nbins+1, [0, nbins])
        cdf = np.cumsum(hist) / sum(hist)
        ilow = np.argmax(cdf > tol_low)
        ihigh = np.argmax(cdf >= tol_high)
        if ilow == ihigh:
            ilowhigh[:, i] = np.array([1, nbins])
        else:
            ilowhigh[:, i] = np.array([ilow, ihigh])

    lims = ilowhigh / nbins
    return lims


def imadjust(img, lims):
    lims = lims.flatten()
    lowIn = lims[0]
    highIn = lims[1]
    lowOut = 0
    highOut = 1
    gamma = 1
    lut = adjustWithLUT(img, lowIn, highIn, lowOut, highOut, gamma)
    out = lut[img].astype(np.uint8)
    return out


def adjustWithLUT(img,lowIn,highIn,lowOut,highOut,gamma):
    lutLength = 256 # assumes uint8
    lut = np.linspace(0, 1, lutLength)
    lut = adjustArray(lut, lowIn, highIn, lowOut, highOut, gamma)
    lut = img_as_ubyte(lut)
    return lut


def adjustArray(img, lIn, hIn, lOut, hOut, g):
    # %make sure img is in the range [lIn;hIn]
    img = np.maximum(lIn, np.minimum(hIn, img))
    out = ((img - lIn) / (hIn - lIn)) ** g
    out = out ** (hOut - lOut) + lOut
    return out

你可以花很长时间来找到精确的参数来很好地分割这幅图像,但根据我的经验,这是很挑剔的,而且对于你想要分割的下一幅图像来说是行不通的。这些天,我建议你使用一个预先训练好的深度学习网络,比如