Geometry 将jMonkeyEngine和Vuforia结合起来:几何体不';不要停留在目标的中心

Geometry 将jMonkeyEngine和Vuforia结合起来:几何体不';不要停留在目标的中心,geometry,target,centering,vuforia,jmonkeyengine,Geometry,Target,Centering,Vuforia,Jmonkeyengine,我正在尝试结合Vuforia SDK和jMonkeyEngine。到目前为止,多维数据集已放置在目标(ImageTarget)上。但当我移动相机时,立方体也会移动一点。我希望立方体保持在目标的中心(就像VuForiSamples ImageTarget中的茶壶)。你知道我怎样才能解决这个问题吗 我认为这是相关的代码: public void initForegroundCamera() { foregroundCamera = new Camera(settings.getWidth()

我正在尝试结合Vuforia SDK和jMonkeyEngine。到目前为止,多维数据集已放置在目标(ImageTarget)上。但当我移动相机时,立方体也会移动一点。我希望立方体保持在目标的中心(就像VuForiSamples ImageTarget中的茶壶)。你知道我怎样才能解决这个问题吗

我认为这是相关的代码:

public void initForegroundCamera()
{
    foregroundCamera = new Camera(settings.getWidth(), settings.getHeight());

    foregroundCamera.setLocation(new Vector3f(0.0f, 0.0f, 0.0f));

    // Get perspective transformation
    CameraCalibration cameraCalibration = CameraDevice.getInstance().getCameraCalibration(); 

    VideoBackgroundConfig config = Renderer.getInstance().getVideoBackgroundConfig();

    float viewportWidth = config.getSize().getData()[0];
    float viewportHeight = config.getSize().getData()[1];

    float cameraWidth = cameraCalibration.getSize().getData()[0];
    float cameraHeight = cameraCalibration.getSize().getData()[1];

    float screenWidth = settings.getWidth();
    float screenHeight = settings.getHeight();

    Vec2F size = new Vec2F(cameraWidth, cameraHeight);
    Vec2F focalLength = cameraCalibration.getFocalLength();

    float fovRadians = 2 * (float) Math.atan(0.5f * (size.getData()[1] / focalLength.getData()[1]));
    float fovDegrees = fovRadians * 180.0f / (float) Math.PI;
    float aspectRatio = (size.getData()[0] / size.getData()[1]);

    // Adjust for screen / camera size distortion
    float viewportDistort = 1.0f;

    if (viewportWidth != screenWidth)
    {
        viewportDistort = viewportWidth / screenWidth;
        fovDegrees = fovDegrees * viewportDistort;
        aspectRatio = aspectRatio / viewportDistort;
        Log.v(TAG, "viewportDistort: " + viewportDistort + " fovDegreed: " + fovDegrees + " aspectRatio: " + aspectRatio); 
    }

    if (viewportHeight != screenHeight)
    {
        viewportDistort = viewportHeight / screenHeight;
        fovDegrees = fovDegrees / viewportDistort;
        aspectRatio = aspectRatio * viewportDistort;
        Log.v(TAG, "viewportDistort: " + viewportDistort + " fovDegreed: " + fovDegrees + " aspectRatio: " + aspectRatio); 
    }

    setCameraPerspectiveFromVuforia(fovDegrees, aspectRatio);

    setCameraViewportFromVuforia(viewportWidth, viewportHeight, cameraWidth, cameraHeight);

    ViewPort foregroundViewPort = renderManager.createMainView("ForegroundView", foregroundCamera);
    foregroundViewPort.attachScene(rootNode);
    foregroundViewPort.setClearFlags(false, true, false);
    foregroundViewPort.setBackgroundColor(ColorRGBA.Blue);

    sceneInitialized = true;
}

private void ProcessTrackable(TrackableResult result, int i)
{
    // Show the 3D object corresponding on the found trackable
    Spatial model = rootNode.getChild(0);
    model.setCullHint(CullHint.Dynamic);

    Matrix44F modelViewMatrix_Vuforia = Tool.convertPose2GLMatrix(result.getPose());
    Matrix44F inverseMatrix_Vuforia = MathHelpers.Matrix44FInverse(modelViewMatrix_Vuforia);
    Matrix44F inverseTransposedMatrix_Vuforia = MathHelpers.Matrix44FTranspose(inverseMatrix_Vuforia);

    float[] modelViewMatrix = inverseTransposedMatrix_Vuforia.getData();

    // Get camera position
    float cam_x = modelViewMatrix[12];
    float cam_y = modelViewMatrix[13];
    float cam_z = modelViewMatrix[14];

    // Get camera rotation
    float cam_right_x = modelViewMatrix[0];
    float cam_right_y = modelViewMatrix[1];
    float cam_right_z = modelViewMatrix[2];
    float cam_up_x =    modelViewMatrix[4];
    float cam_up_y =    modelViewMatrix[5];
    float cam_up_z =    modelViewMatrix[6];
    float cam_dir_x =   modelViewMatrix[8];
    float cam_dir_y =   modelViewMatrix[9];
    float cam_dir_z =   modelViewMatrix[10];


    setCameraPoseFromVuforia(cam_x, cam_y, cam_z);
    setCameraOrientationFromVuforia(cam_right_x, cam_right_y, cam_right_z, cam_up_x, cam_up_y, cam_up_z, cam_dir_x, cam_dir_y, cam_dir_z);

}

//we modify the left axis of the JME camera to match the coodindate system used by Vuforia
private void setCameraPerspectiveFromVuforia(float fovY, float aspectRatio) 
{

    foregroundCamera.setFrustumPerspective(fovY, aspectRatio, 1.0f, 1000.0f); 
    foregroundCamera.update();
}

private void setCameraPoseFromVuforia(float camX, float camY, float camZ)
{
    foregroundCamera.setLocation(new Vector3f(camX, camY, camZ));
    foregroundCamera.update();
}

private void setCameraOrientationFromVuforia(float camRightX, float camRightY, float camRightZ, float camUpX, float camUpY, float camUpZ, float camDirX, float camDirY, float camDirZ)
{
    foregroundCamera.setAxes(new Vector3f(-camRightX, -camRightY, -camRightZ), new Vector3f(-camUpX, -camUpY, -camUpZ), new Vector3f( camDirX, camDirY, camDirZ));
    foregroundCamera.update();
}

我还使用JMonkey引擎实现了Vuforia。我必须承认,即使在我拿着手机不动的时候,3D模型的晃动也是显而易见的,而仅仅使用OpenGl渲染引擎和Vuforia并不能产生这样的结果


原因可能是屏幕上渲染的是四边形,纹理是从相机输出的,这也需要一段时间来更改每一帧。此外,在运行我的应用程序时,手机会变得非常热,所以我想这对处理器来说也是一个沉重的负担。

我还使用JMonkey引擎实现了Vuforia。我必须承认,即使在我拿着手机不动的时候,3D模型的晃动也是显而易见的,而仅仅使用OpenGl渲染引擎和Vuforia并不能产生这样的结果

原因可能是屏幕上渲染的是四边形,纹理是从相机输出的,这也需要一段时间来更改每一帧。此外,在运行我的应用程序时,手机会变得非常热,所以我想这对处理器来说也是一个沉重的负担