Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 寻找错误的单应性_Python_Opencv_Matrix_Homography_Image Stitching - Fatal编程技术网

Python 寻找错误的单应性

Python 寻找错误的单应性,python,opencv,matrix,homography,image-stitching,Python,Opencv,Matrix,Homography,Image Stitching,我试图用Python中的OpenCV做一个图像拼接项目,我使用Lucas Kanade算法跟踪视频帧之间的点来计算点匹配,以找到单应矩阵。在编写了这个程序并将视频帧缝合在一起之后,我决定运行一个测试,在这个测试中,我只需将每个图像的透视扭曲版本显示在黑色画布上,以查看单应矩阵是如何扭曲它们的。当我这样做的时候,帧不是在帧之间一点一点地移动,而是越来越远地移动,远离帧之间的轻微移动 [---------------------------------------------------------

我试图用Python中的OpenCV做一个图像拼接项目,我使用Lucas Kanade算法跟踪视频帧之间的点来计算点匹配,以找到单应矩阵。在编写了这个程序并将视频帧缝合在一起之后,我决定运行一个测试,在这个测试中,我只需将每个图像的透视扭曲版本显示在黑色画布上,以查看单应矩阵是如何扭曲它们的。当我这样做的时候,帧不是在帧之间一点一点地移动,而是越来越远地移动,远离帧之间的轻微移动

[--------------------------------------------------------------空白----------------------------------------------------]

[Frame0------------------------------------------------------------------------------------------------------------------------]

[-------------Frame1--------------------------------------------------------------------------------------------]

[---------------------------------------第2帧---------------------------------------------------------------------]

[--------------------------------------------------------------------------------------第3帧----------------]

后续帧将超出可视范围。我不清楚为什么会发生这种情况。我执行了反投影错误检查,以确保只传递具有精确光流计算的点。我还将findHomography的反投影阈值设置为10、1,然后设置为0.5,所有这些都无效。我正在缝合多个图像,所以我在帧之间乘以我的单应矩阵。这似乎加剧了错误。为什么会发生这种情况?我如何修复我的单应矩阵?这是我的代码(忽略注释掉的测试。另外,一些缩进格式可能在复制到论坛时被弄乱了):

将numpy导入为np
导入系统
进口cv2
输入数学
lastFeatures=无
currentFeatures=无
opticFlow=None
panRow=无
行=无
finalPanorama=无
def loadRow(目录路径、fType、数字图像、列):
imageRow=[]
对于范围内的i(0,numImages):
append(cv2.imread(“%s/%i\u%i.%s”%(dirPath,column,i,fType),cv2.imread\u COLOR))
返回图像行
def findNthFeatures(前置、前置、nxtImg):
后退阈值=0.5
nxtDescriptors=[]
灰色=无
nxtGrey=无
nxtPnts=prevPnts[:]
PrevGray=cv2.CVT颜色(prevImg,cv2.COLOR\u BGR2GRAY)
nxtGrey=cv2.cvt颜色(nxtImg,cv2.COLOR\u bgr2灰色)
lucasKanadeParams=dict(winSize=(19,19),maxLevel=100,标准=(cv2.TERM_criteria|u EPS | cv2.TERM_criteria_COUNT,10,0.03))
nxtPnts,status,err=cv2.calcOpticalFlowPyrLK(prevGrey,nxtGrey,prevPnts,None,**lucasKanadeParams)
反向投影,状态,err=cv2.calcOpticalFlowPyrLK(nxtGrey,prevGrey,nxtPnts,None,**lucasKanadeParams)
d=绝对值(前向-后向投影)。重塑(-1,2)。最大值(-1)
状态=d<返回\u阈值
goodNew=nxtPnts[status].copy()
goodLast=prevPnts[status].copy()
返回goodLast,goodNew
def getHomographies(视频名称):
颜色=np.random.randint(0255,(100,3))
lastFrame=无
当前帧=无
lastKeypoints=无
currentKeypoints=无
firstImage=True
功能刷新率=5
特征参数=dict(最大角=100,
qualityLevel=0.1,
距离=8,
块大小=15)
帧数=0
同音字=[]
cv2.namedWindow(“显示”,cv2.WINDOW\u正常)
cap=cv2.VideoCapture(videoName)
标志,frame=cap.read()
而旗帜:
如果是firstImage:
firstImage=False
lastFrame=frame[:,:]复制()
lastGray=cv2.CVT颜色(lastFrame,cv2.COLOR\u BGR2GRAY)
lastKeypoints=cv2.goodFeaturesToTrack(lastGray,mask=None,**特征参数)
标志,frame=cap.read()
帧数+=1
其他:
掩码=np.类零(最后一帧)
currentFrame=frame[:,:]复制()
帧数+=1
lastKeypoints,currentKeypoints=findNthFeatures(lastFrame,lastKeypoints,currentFrame)
#对于枚举(zip(currentKeypoints,lastKeypoints))中的i(新的,旧的):
#a,b=new.ravel()
#c,d=old.ravel()
#mask=cv2.线条(mask,(a,b),(c,d),颜色[i]。tolist(),2)
#frame=cv2.圆(frame,(a,b),5,颜色[i]。tolist(),-1)
#img=cv2.add(帧、掩码)
#cv2.imshow(“显示”,img)
#cv2.等待键(0)
同形矩阵,同形状态=cv2。findHomography(当前关键点,最后关键点,cv2.RANSAC,0.5)
同形文字。附加(同形文字矩阵)
lastFrame=currentFrame
lastKeypoints=currentKeypoints
如果帧数%featureRefreshRate==0:
grayBuf=cv2.cvt颜色(最后一帧,cv2.COLOR\u bgr2灰色)
lastKeypoints=cv2.goodFeaturesToTrack(灰色,掩码=None,**特征参数)
标志,frame=cap.read()
返回同音字
def stitchRow(视频名称):
cv2.namedWindow(“显示”,cv2.WINDOW\u正常)
帧数=0
cap=cv2.VideoCapture(videoName)
ret,initialImage=cap.read()
同形矩阵=[]
homographyMatrices=getHomographies(videoName)
warpHMat=同形矩阵[帧数]
而ret:
ret,nextImg=cap.read()
帧数+=1
结果=cv2.warpPerspective(nextImg,warpHMat,(initialImage.shape[1]+nextImg.shape[1],nextImg.shape[0]))
#结果[0:initialImage.shape[0],0:initialImage.shape[1]]=initialImage
cv2.imshow(“显示”,结果)
cv2.等待键(0)
#cv2.imshow('displa
import numpy as np
import sys
import cv2
import math

lastFeatures = None
currentFeatures = None
opticFlow = None
panRow = None
Rows = None
finalPanorama = None

def loadRow(dirPath, fType, numImages,  column):
    imageRow = []
    for i in range(0, numImages):
            imageRow.append(cv2.imread("%s/%i_%i.%s" % (dirPath, column, i, fType), cv2.IMREAD_COLOR))
    return imageRow

def findNthFeatures(prevImg, prevPnts, nxtImg):

    back_threshold = 0.5

    nxtDescriptors = []
    prevGrey = None
    nxtGrey = None
    nxtPnts = prevPnts[:]

    prevGrey = cv2.cvtColor(prevImg, cv2.COLOR_BGR2GRAY)
    nxtGrey = cv2.cvtColor(nxtImg, cv2.COLOR_BGR2GRAY)

    lucasKanadeParams = dict(winSize = (19,19), maxLevel = 100, criteria = (cv2.TERM_CRITERIA_EPS |     cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    nxtPnts, status, err = cv2.calcOpticalFlowPyrLK(prevGrey, nxtGrey, prevPnts, None, **lucasKanadeParams)
    backProjections, status, err = cv2.calcOpticalFlowPyrLK(nxtGrey, prevGrey, nxtPnts, None, **lucasKanadeParams)
    d = abs(prevPnts - backProjections).reshape(-1, 2).max(-1)
    status = d < back_threshold
    goodNew = nxtPnts[status].copy()
    goodLast = prevPnts[status].copy()

    return goodLast, goodNew

def getHomographies(videoName):
    color = np.random.randint(0,255,(100,3))    
    lastFrame = None
    currentFrame = None
    lastKeypoints = None
    currentKeypoints = None
    firstImage = True
    featureRefreshRate = 5

    feature_params = dict( maxCorners = 100,
                        qualityLevel = 0.1,
                        minDistance = 8,
                        blockSize = 15)

    frameCount = 0

    Homographies = []

    cv2.namedWindow('display', cv2.WINDOW_NORMAL) 
    cap = cv2.VideoCapture(videoName)
    flags, frame = cap.read()

    while flags:
    if firstImage:                                                
        firstImage = False
            lastFrame = frame[:,:].copy()
            lastGray = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
        lastKeypoints = cv2.goodFeaturesToTrack(lastGray, mask = None, **feature_params)
            flags, frame = cap.read()
            frameCount += 1
        else:
            mask = np.zeros_like(lastFrame)           
            currentFrame = frame[:,:].copy()
            frameCount += 1

        lastKeypoints, currentKeypoints = findNthFeatures(lastFrame, lastKeypoints, currentFrame)
  #         for i,(new,old) in enumerate(zip(currentKeypoints, lastKeypoints)):
  #             a, b = new.ravel()
  #             c, d = old.ravel()
   #             mask = cv2.line(mask, (a,b), (c,d), color[i].tolist(), 2)
   #             frame = cv2.circle(frame, (a,b), 5, color[i].tolist(), -1)
   #         img = cv2.add(frame,mask)

#            cv2.imshow('display', img)
#            cv2.waitKey(0)
            homographyMatrix, homographyStatus = cv2.findHomography(currentKeypoints, lastKeypoints, cv2.RANSAC, 0.5)

            Homographies.append(homographyMatrix)   
        lastFrame = currentFrame
        lastKeypoints = currentKeypoints

            if frameCount % featureRefreshRate == 0:
                grayBuf = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
                lastKeypoints = cv2.goodFeaturesToTrack(grayBuf, mask = None, **feature_params)

            flags, frame = cap.read()

    return Homographies

def stitchRow(videoName):

    cv2.namedWindow('display', cv2.WINDOW_NORMAL)

    frameCount = 0
    cap = cv2.VideoCapture(videoName)
    ret, initialImage = cap.read()
    homographyMatrices = []
    homographyMatrices = getHomographies(videoName)
    warpHMat = homographyMatrices[frameCount]

    while ret:
        ret, nextImg = cap.read()
        frameCount += 1
        result = cv2.warpPerspective(nextImg, warpHMat, (initialImage.shape[1] + nextImg.shape[1],   nextImg.shape[0]))
        #result[0:initialImage.shape[0], 0:initialImage.shape[1]] = initialImage
        cv2.imshow('display', result)
        cv2.waitKey(0)
#        cv2.imshow('display', initialImage)
#        cv2.waitKey(0)
        warpHMat = homographyMatrices[frameCount]

        for j in range(frameCount, 0, -1):
            warpHMat = warpHMat * homographyMatrices[j-1]

#        initialImage = result[:, :].copy()

stitchRow(sys.argv[1])