Python ';cv2.FlannBasedMatcher&x27;对象没有属性';KNN匹配';

Python ';cv2.FlannBasedMatcher&x27;对象没有属性';KNN匹配';,python,image,opencv,image-processing,spyder,Python,Image,Opencv,Image Processing,Spyder,该代码用于在网络摄像头捕获的图像上使用FLANN matcher实现基于SIFT的算法。由于某种原因,错误出现在knnMatch中,我们处理捕获的图像。附加的图像链接显示导致错误的行。如果有人能为这个问题提供一些解决方案,那就太好了,具体细节请在下面评论 import cv2 import numpy as np MIN_MATCH_COUNT = 30 detector = cv2.xfeatures2d.SIFT_create() FLANN_INDEX_KDITREE = 0

该代码用于在网络摄像头捕获的图像上使用FLANN matcher实现基于SIFT的算法。由于某种原因,错误出现在knnMatch中,我们处理捕获的图像。附加的图像链接显示导致错误的行。如果有人能为这个问题提供一些解决方案,那就太好了,具体细节请在下面评论

    import cv2
import numpy as np

MIN_MATCH_COUNT = 30

detector = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDITREE = 0
flannParam = dict(algorithm=FLANN_INDEX_KDITREE,tree=5)
searchParam = dict(check = 50)
flann=cv2.FlannBasedMatcher(flannParam,searchParam)

trainImg=cv2.imread("E:\\EXCHANGE_Courses\\training_img1.jpg")
trainImg1 = cv2.cvtColor(trainImg,cv2.COLOR_BGR2GRAY)
trainKP,trainDecs = detector.detectAndCompute(trainImg1,None)

cam = cv2.VideoCapture(1)
print(cam.isOpened())

for i in range(1):
    return_value, image = cam.read()
    cv2.imwrite('capture'+str(i)+'.jpg', image)
del(cam)


while True:
   
   
    QImage = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
  
    
    queryKP,queryDesc = detector.detectAndCompute(QImage,None)
   # Now match the key descriptions from the training image and the query image
    # np.asarray(des1,np.float32),np.asarray(des2,np.float32),k=2
     #   queryDesc,trainDecs, k=2
    matches=flann.knnMatches(queryDesc,trainDecs, k=2)
    print("upper part clear")
    # Filter the pool of keypoints as we need to collect the key points of interest only with the object in mind
    goodMatch=[]
    for m,n in matches:

        if(m.distance<0.75*n.distance):
            goodMatch.append(m)
            print("all ok here")
        
    if(len(goodMatch)>MIN_MATCH_COUNT):
            tp=[]
            qp=[]
            for m in goodMatch:
                tp.append(trainKP[m.trainIdx].pt)
                qp.append(queryKP[m.queryIdx].pt)
                tp,qp = np.float32((tp,qp))
                H,status = cv2.findHomography(tp,qp,cv2.RANSAC,3.0)
                h,w=trainImg.shape
                
                trainBorder = np.float32([[[0,0],[0,h-1],[w-1,h-1],[0,w-1]]])
                queryBorder = cv2.perspectiveTransform(trainBorder,H)
                # changed QImageBGR to image
                cv2.polylines(QImage,[np.uint8(queryBorder)],True,(0,255,0),3)
    else:
            print("Not enough matches - %d/%d" %len(goodMatch),MIN_MATCH_COUNT)
            cv2.imshow('results',QImage)
             #print ("Not enough matches are found - %d/%d" % (len(goodMatch),MIN_MATCH_COUNT))
             #matchesMask = None
             #draw_params = dict(matchColor = (0,255,0), # draw matches in green color
             #      singlePointColor = None,
                  # matchesMask = matchesMask, # draw only inliers
                  # flags = 2)

#img3 = cv2.drawMatches(trainImg1,trainKP,QImage,queryKP,goodMatch,None,**draw_params)

#plt.imshow(img3, 'gray'),plt.show()

            if cv2.waitKey(10)==ord('q'):
                break
#cam.release()
#cv2.destroyAllWindows()
导入cv2
将numpy作为np导入
最小匹配计数=30
检测器=cv2.xfeatures2d.SIFT_create()
法兰索引=0
flannParam=dict(算法=FLANN_索引_KDITREE,树=5)
searchParam=dict(检查=50)
flann=cv2.FlannBasedMatcher(flannParam,searchParam)
trainimg2.imread(“E:\\EXCHANGE\u Courses\\training\u img1.jpg”)
trainImg1=cv2.CVT颜色(trainImg,cv2.COLOR\U BGR2GRAY)
trainKP,trainDecs=检测器。检测和计算(trainImg1,无)
cam=cv2.视频捕获(1)
打印(cam.isOpened())
对于范围(1)中的i:
返回值,image=cam.read()
cv2.imwrite('capture'+str(i)+'.jpg',image)
德尔(cam)
尽管如此:
QImage=cv2.cvt颜色(图像,cv2.COLOR\u bgr2灰色)
queryKP,queryDesc=检测器。检测器和计算(QImage,无)
#现在匹配训练图像和查询图像中的关键描述
#np.asarray(des1,np.float32),np.asarray(des2,np.float32),k=2
#queryDesc,列车DECS,k=2
匹配=法兰N.KNN匹配(QueryDecs,trainDecs,k=2)
打印(“上部清晰”)
#过滤关键点池,因为我们只需要在考虑对象的情况下收集感兴趣的关键点
goodMatch=[]
对于匹配中的m,n:
如果(m.distanceMIN\u匹配\u计数):
tp=[]
qp=[]
对于goodMatch中的m:
tp.append(trainKP[m.trainIdx].pt)
追加(queryKP[m.queryIdx].pt)
tp,qp=np.32((tp,qp))
H、 状态=cv2.findHomography(tp,qp,cv2.RANSAC,3.0)
h、 w=列车外形
列车边界=np.float32([[0,0],[0,h-1],[w-1,h-1],[0,w-1]]
queryBorder=cv2.透视变换(列车边界,H)
#将QImageBGR更改为图像
cv2.多段线(QImage,[np.uint8(查询顺序)],真,(0255,0),3)
其他:
打印(“匹配不足-%d/%d”%len(goodMatch),最小匹配计数)
cv2.imshow(“结果”,QImage)
#打印(“未找到足够的匹配项-%d/%d”%(len(goodMatch),MIN\u MATCH\u COUNT))
#matchesMask=None
#绘制参数=dict(匹配颜色=(0255,0),#绘制绿色的匹配
#singlePointColor=无,
#matchesMask=matchesMask,#只绘制内线
#旗帜=2)
#img3=cv2.绘图匹配(trainImg1、trainKP、QImage、queryKP、goodMatch、None、**绘图参数)
#plt.imshow(img3,‘灰色’),plt.show()
如果cv2.waitKey(10)=ord('q'):
打破
#cam.release()
#cv2.destroyAllWindows()

派对迟到了一点,但我猜你指的是KNMatch而不是KNMatches。

派对迟到了一点,但我猜你指的是KNMatches而不是KNMatches