Java OpenCV图像坐标到世界坐标的转换问题
我正在尝试使用单目照相机和以下设备拍摄对象的图像:Java OpenCV图像坐标到世界坐标的转换问题,java,opencv,coordinate-systems,ransac,opencv-solvepnp,Java,Opencv,Coordinate Systems,Ransac,Opencv Solvepnp,我正在尝试使用单目照相机和以下设备拍摄对象的图像: 首先对摄像机进行正常校准 然后使用世界系统和相机系统中的已知对象坐标对相机进行透视校准 在这两个步骤之后,我应该能够获得帧中任何检测到的对象的世界坐标。这以更详细的模式解释了我要做的事情,但与链接不同,我不是将OpenCV与Python结合使用,而是与Java结合使用 到目前为止,我已经成功地进行了正常校准,获得了相机的固有参数和旋转/平移向量。我在solvePnPRansac()OpenCv函数中使用了这些参数,以获得相机的外部矩阵,从而可以
double[] cx = this.optMat.get(0, 2);
double[] cy = this.optMat.get(1, 2);
int realCenterX = 258;
int realCenterY = 250;
int realCenterZ = 453;
MatOfPoint3f worldPoints = new MatOfPoint3f();
MatOfPoint2f imagePoints = new MatOfPoint2f();
List<Point3> objPoints = new ArrayList<Point3>();
objPoints.add(new Point3(realCenterX,realCenterY,realCenterZ));
objPoints.add(new Point3(154,169,475));
objPoints.add(new Point3(244,169,470));
objPoints.add(new Point3(337,169,470));
objPoints.add(new Point3(154,240,469));
objPoints.add(new Point3(244,240,452));
objPoints.add(new Point3(337,240,462));
objPoints.add(new Point3(154,310,472));
objPoints.add(new Point3(244,310,460));
objPoints.add(new Point3(337,310,468));
worldPoints.fromList(objPoints);
List<Point> imgPoints = new ArrayList<Point>();
imgPoints.add(new Point(cx[0],cy[0]));
imgPoints.add(new Point(569,99));
imgPoints.add(new Point(421,100));
imgPoints.add(new Point(272,100));
imgPoints.add(new Point(571,212));
imgPoints.add(new Point(422,213));
imgPoints.add(new Point(273,214));
imgPoints.add(new Point(574,327));
imgPoints.add(new Point(423,328));
imgPoints.add(new Point(273,330));
imagePoints.fromList(imgPoints);
for(int i= 0;i<worldPoints.rows();i++) {
for(int j=0;j<worldPoints.cols();j++) {
double[] pointI = worldPoints.get(i, j);
double wX = pointI[0]-realCenterX;
double wY = pointI[1]-realCenterY;
double wD = pointI[2];
double D1 = Math.sqrt((wX*wX)+(wY+wY));
double wZ = Math.sqrt((wD*wD)+(D1*D1));
pointI[2] = wZ;
worldPoints.put(i, j, pointI);
}
}
Mat optMatInv = new Mat();
Core.invert(this.optMat, optMatInv);
Calib3d.solvePnPRansac(worldPoints, imagePoints, optMat, distCoeffs, rvecsPnP, tvecsPnP, true, 100, (float) 0.5, 0.99, new Mat(), Calib3d.SOLVEPNP_ITERATIVE);
Calib3d.Rodrigues(this.rvecsPnP, this.rodriguesVecs);
this.rodriguesVecs.copyTo(this.extrinsicMat);
List<Mat> concat = new ArrayList<Mat>();
concat.add(this.rodriguesVecs);
concat.add(this.tvecsPnP);
Core.hconcat(concat, this.extrinsicMat);
Core.gemm(this.optMat, this.extrinsicMat, 1, new Mat(), 0, this.projectionMat);
int nbOfElements = worldPoints.rows() * worldPoints.cols();
List<Double> sDescribe = new ArrayList<Double>();
从这一步开始,我就具备了执行第一个链接中显示的两个操作所需的一切:
这就是事情变得复杂的地方。当我使用世界系统坐标时,我获得了正确的图像系统坐标,正如通过对象检测算法获得的那样(+-几个像素)。然而,当我尝试执行第二个操作时,得到的结果毫无意义。首先是我用来获取外部参数的代码:
double[] cx = this.optMat.get(0, 2);
double[] cy = this.optMat.get(1, 2);
int realCenterX = 258;
int realCenterY = 250;
int realCenterZ = 453;
MatOfPoint3f worldPoints = new MatOfPoint3f();
MatOfPoint2f imagePoints = new MatOfPoint2f();
List<Point3> objPoints = new ArrayList<Point3>();
objPoints.add(new Point3(realCenterX,realCenterY,realCenterZ));
objPoints.add(new Point3(154,169,475));
objPoints.add(new Point3(244,169,470));
objPoints.add(new Point3(337,169,470));
objPoints.add(new Point3(154,240,469));
objPoints.add(new Point3(244,240,452));
objPoints.add(new Point3(337,240,462));
objPoints.add(new Point3(154,310,472));
objPoints.add(new Point3(244,310,460));
objPoints.add(new Point3(337,310,468));
worldPoints.fromList(objPoints);
List<Point> imgPoints = new ArrayList<Point>();
imgPoints.add(new Point(cx[0],cy[0]));
imgPoints.add(new Point(569,99));
imgPoints.add(new Point(421,100));
imgPoints.add(new Point(272,100));
imgPoints.add(new Point(571,212));
imgPoints.add(new Point(422,213));
imgPoints.add(new Point(273,214));
imgPoints.add(new Point(574,327));
imgPoints.add(new Point(423,328));
imgPoints.add(new Point(273,330));
imagePoints.fromList(imgPoints);
for(int i= 0;i<worldPoints.rows();i++) {
for(int j=0;j<worldPoints.cols();j++) {
double[] pointI = worldPoints.get(i, j);
double wX = pointI[0]-realCenterX;
double wY = pointI[1]-realCenterY;
double wD = pointI[2];
double D1 = Math.sqrt((wX*wX)+(wY+wY));
double wZ = Math.sqrt((wD*wD)+(D1*D1));
pointI[2] = wZ;
worldPoints.put(i, j, pointI);
}
}
Mat optMatInv = new Mat();
Core.invert(this.optMat, optMatInv);
Calib3d.solvePnPRansac(worldPoints, imagePoints, optMat, distCoeffs, rvecsPnP, tvecsPnP, true, 100, (float) 0.5, 0.99, new Mat(), Calib3d.SOLVEPNP_ITERATIVE);
Calib3d.Rodrigues(this.rvecsPnP, this.rodriguesVecs);
this.rodriguesVecs.copyTo(this.extrinsicMat);
List<Mat> concat = new ArrayList<Mat>();
concat.add(this.rodriguesVecs);
concat.add(this.tvecsPnP);
Core.hconcat(concat, this.extrinsicMat);
Core.gemm(this.optMat, this.extrinsicMat, 1, new Mat(), 0, this.projectionMat);
int nbOfElements = worldPoints.rows() * worldPoints.cols();
List<Double> sDescribe = new ArrayList<Double>();
double[]cx=this.optMat.get(0,2);
double[]cy=this.optMat.get(1,2);
int realCenterX=258;
int realCenterY=250;
int realCenterZ=453;
MatOfPoint3f世界点=新的MatOfPoint3f();
MatOfPoint2f imagePoints=新的MatOfPoint2f();
List objPoints=new ArrayList();
添加(新的点3(realCenterX、realCenterY、realCenterZ));
添加(新的第3点(154169475));
添加(新的第3点(244169470));
添加(新的第3点(337169470));
添加(新的第3点(154240469));
添加(新的第3点(244240452));
添加(新的第3点(337240462));
添加(新的第3点(154310472));
添加(新的第3点(244310460));
增加(新的第3点(337310468));
worldPoints.fromList(objPoints);
List imgPoints=new ArrayList();
imgPoints.add(新点(cx[0],cy[0]);
增加(新点(569,99));
imgPoints.add(新点(421100));
imgPoints.add(新点(272100));
增加(新点(571212));
imgPoints.add(新点(422213));
imgPoints.add(新点(273214));
imgPoints.add(新点(574327));
imgPoints.add(新点(423328));
imgPoints.add(新点(273330));
imagePoints.fromList(imgPoints);
对于(int i=0;i
for(int i= 0;i<nbOfElements;i++) {
double[] pointArray = imagePoints.get(i, 0);
double sPoint = sDescribe.get(i);
Mat pointI = new Mat(3,1,CvType.CV_64F);
pointI.put(0, 0, pointArray[0]);
pointI.put(1, 0, pointArray[1]);
pointI.put(2, 0, 1);
Mat transPointI = new Mat();
Core.transpose(pointI, transPointI);
Mat sUV = new Mat(3,1,CvType.CV_64F);
Core.multiply(transPointI, new Scalar(sPoint,sPoint,sPoint), sUV);
Mat invCameraMatrix = new Mat(3,3,CvType.CV_64F);
Core.invert(this.optMat, invCameraMatrix);
Mat tmp1 = new Mat();
Core.gemm(sUV,invCameraMatrix, 1, new Mat(), 0, tmp1);
Mat tVecsInv = new Mat();
Core.transpose(this.tvecsPnP, tVecsInv);
Mat tmp2 = new Mat();
Core.subtract(tmp1, tVecsInv, tmp2);
Mat XYZ = new Mat();
Mat inverseRMat = new Mat();
Core.invert(this.rodriguesVecs, inverseRMat);
Core.gemm(tmp2, inverseRMat, 1, new Mat(), 0, XYZ);
}