Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/.htaccess/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
在opengl es 1中,我试图为android应用程序实现一个光线选择器_Android_Opengl Es - Fatal编程技术网

在opengl es 1中,我试图为android应用程序实现一个光线选择器

在opengl es 1中,我试图为android应用程序实现一个光线选择器,android,opengl-es,Android,Opengl Es,我试图在我的android应用程序中实现光线拾取,我一直在使用它作为参考指南。当我导入类并从站点复制一些类时,代码中会出现错误。他们也没有很好地解释将代码放在哪里。稍后我会将所有内容转换为更新的Opengl版本,我只是想得到一个工作代码并理解它。有很多代码,但我会列出我有什么。我意识到这可能是一篇重复的文章,但我浏览了所有的网页,找到了很多关于这个主题的信息,但是没有任何我可以作为例子来理解它 public class CadActivity extends ActionBarActivity

我试图在我的android应用程序中实现光线拾取,我一直在使用它作为参考指南。当我导入类并从站点复制一些类时,代码中会出现错误。他们也没有很好地解释将代码放在哪里。稍后我会将所有内容转换为更新的Opengl版本,我只是想得到一个工作代码并理解它。有很多代码,但我会列出我有什么。我意识到这可能是一篇重复的文章,但我浏览了所有的网页,找到了很多关于这个主题的信息,但是没有任何我可以作为例子来理解它

public class CadActivity extends ActionBarActivity  {

GLSurfaceView cadLayout;
TextView xV = (TextView) findViewById(R.id.x);
TextView yV = (TextView) findViewById(R.id.y);
TextView zV = (TextView) findViewById(R.id.z);





@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);

    cadLayout= new CadGlSurfaceView(this);
    cadLayout.setGLWrapper(new GLSurfaceView.GLWrapper() {
        @Override
        public GL wrap(GL gl) {
            return new MatrixTrackingGL(gl);
        }
    });
    setContentView(R.layout.activity_cad);
    RelativeLayout v = (RelativeLayout) findViewById(R.id.surfacegl);
    v.addView(cadLayout);




}
上面的代码是我使用GL包装器的主要活动

以下代码是我的GLSURFACHEVIEW:

public class CadGlSurfaceView extends GLSurfaceView {
float x,y,sX,sY,fX,fY;
 CadActivity main;


public CadGlSurfaceView(Context context) {

    super(context);

    setRenderer(new CadRenderer());
}

@Override
public boolean onTouchEvent(MotionEvent event) {
int action = event.getActionMasked();
x = event.getX();
y = event.getY();
    switch(action){
        case MotionEvent.ACTION_DOWN:
            x = event.getX();
            y = event.getY();
            sX = event.getX();
            sY = event.getY();

            return true;
        case MotionEvent.ACTION_MOVE:
            x = event.getX();
            y = event.getY();
            return true;
        case MotionEvent.ACTION_UP:
            x = event.getX();
            y = event.getY();
            fX = event.getX();
            fY = event.getY();

            return true;

    }




    return true;
  }
}
下面的代码是我的渲染器:

public class CadRenderer implements GLSurfaceView.Renderer {

private GlObjects objects;
MatrixGrabber matrixGrabber = new MatrixGrabber();
public CadRenderer(){
    objects = new GlObjects();

}



@Override
public void onSurfaceCreated(GL10 gl, EGLConfig eglconfig) {
    gl.glDisable(GL10.GL_DITHER);
    gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT,GL10.GL_FASTEST);
    gl.glClearColor(.8f,0f,.2f,1f);
    gl.glClearDepthf(1f);
}

@Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
    gl.glViewport(0,0,width,height);
    int[] viewport = {0, 0, width, height};
    float ratio = (float) width/height;
    gl.glMatrixMode(GL10.GL_PROJECTION);
    gl.glLoadIdentity();
    gl.glFrustumf(-ratio,ratio,-1,1f,1,25);
    matrixGrabber.getCurrentState(gl);
    matrixGrabber.mModelView;
    matrixGrabber.mProjection;

}

@Override
public void onDrawFrame(GL10 gl) {
    gl.glDisable(GL10.GL_DITHER);
    gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);

    gl.glMatrixMode(GL10.GL_MODELVIEW);
    gl.glLoadIdentity();
    GLU.gluLookAt(gl,0,0,-5,0,0,0,0,2,0);

    objects.draw(gl);

  }
}
matrixGrabber.mModelView; matrixGrabber.m项目; 这两段代码下面有一条红线,表示(不是语句)

这是我正在画的对象。我会画线,所以我知道我需要改变一些事情

public class GlObjects {
private float vertices[]={
    0f,1f,
    1f,-1f,
    -1f,-1f
};

private short pIndex[]={0,1,2};

private ShortBuffer pBuff;
private FloatBuffer vertBuff;

public GlObjects(){
    ByteBuffer bBuff = ByteBuffer.allocateDirect(vertices.length*4);
    bBuff.order(ByteOrder.nativeOrder());
    vertBuff = bBuff.asFloatBuffer();
    vertBuff.put(vertices);
    vertBuff.position(0);

    ByteBuffer pbBuff = ByteBuffer.allocateDirect(pIndex.length*2);
    pbBuff.order(ByteOrder.nativeOrder());
    pBuff = pbBuff.asShortBuffer();
    pBuff.put(pIndex);
    pBuff.position(0);
}

public void draw(GL10 gl){
    gl.glFrontFace(GL10.GL_CW);
    gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
    gl.glVertexPointer(2,GL10.GL_FLOAT,0,vertBuff);
    gl.glDrawElements(GL10.GL_TRIANGLES,pIndex.length,GL10.GL_UNSIGNED_SHORT,pBuff);
    gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);

  }
}
这是网站上的Ray课程

public class Ray {
public Ray(GL10 gl, int width, int height, float xTouch, float yTouch) {
    MatrixGrabber matrixGrabber = new MatrixGrabber();
    matrixGrabber.getCurrentState(gl);

    int[] viewport = {0, 0, width, height};

    float[] nearCoOrds = new float[3];
    float[] farCoOrds = new float[3];
    float[] temp = new float[4];
    float[] temp2 = new float[4];
    // get the near and far ords for the click

    float winx = xTouch, winy =(float)viewport[3] - yTouch;

//        Log.d(TAG, "modelView is =" + Arrays.toString(matrixGrabber.mModelView));
//        Log.d(TAG, "projection view is =" + Arrays.toString( matrixGrabber.mProjection ));

    int result = GLU.gluUnProject(winx, winy, 1.0f, matrixGrabber.mModelView, 0, 
matrixGrabber.mProjection, 0, viewport, 0, temp, 0);

    Matrix.multiplyMV(temp2, 0, matrixGrabber.mModelView, 0, temp, 0);
    if(result == GL10.GL_TRUE){
        nearCoOrds[0] = temp2[0] / temp2[3];
        nearCoOrds[1] = temp2[1] / temp2[3];
        nearCoOrds[2] = temp2[2] / temp2[3];

    }

    result = GLU.gluUnProject(winx, winy, 0, matrixGrabber.mModelView, 0,    
matrixGrabber.mProjection, 0, viewport, 0, temp, 0);
    Matrix.multiplyMV(temp2,0,matrixGrabber.mModelView, 0, temp, 0);
    if(result == GL10.GL_TRUE){
        farCoOrds[0] = temp2[0] / temp2[3];
        farCoOrds[1] = temp2[1] / temp2[3];
        farCoOrds[2] = temp2[2] / temp2[3];
    }
    this.P0 = farCoOrds;
    this.P1 = nearCoOrds;
  }

}
public class Vector {
// dot product (3D) which allows vector operations in arguments
public static float dot(float[] u,float[] v) {
    return ((u[X] * v[X]) + (u[Y] * v[Y]) + (u[Z] * v[Z]));
}
public static float[] minus(float[] u, float[] v){
    return new float[]{u[X]-v[X],u[Y]-v[Y],u[Z]-v[Z]};
}
public static float[] addition(float[] u, float[] v){
    return new float[]{u[X]+v[X],u[Y]+v[Y],u[Z]+v[Z]};
}
//scalar product
public static float[] scalarProduct(float r, float[] u){
    return new float[]{u[X]*r,u[Y]*r,u[Z]*r};
}
// (cross product)
public static float[] crossProduct(float[] u, float[] v){
    return new float[]{(u[Y]*v[Z]) - (u[Z]*v[Y]),(u[Z]*v[X]) - (u[X]*v[Z]),(u[X]*v[Y]) - (u[Y]*v 
[X])};
}
//mangnatude or length
public static float length(float[] u){
    return (float) Math.abs(Math.sqrt((u[X] *u[X]) + (u[Y] *u[Y]) + (u[Z] *u[Z])));
}

public static final int X = 0;
public static final int Y = 1;
public static final int Z = 2;
}
这是0.P0=法拉库德; this.P1=近坐标; 下面有一条红线,上面写着(无法解析符号)

这是网站上的三角形课程:

public class Triangle {
public float[] V0;
public float[] V1;
public float[] V2;

public Triangle(float[] V0, float[] V1, float[] V2){
    this.V0 =V0;
    this.V1 = V1;
    this.V2 = V2;
}


private static final float SMALL_NUM =  0.00000001f; // anything that avoids division overflow


// intersectRayAndTriangle(): intersect a ray with a 3D triangle
//    Input:  a ray R, and a triangle T
//    Output: *I = intersection point (when it exists)
//    Return: -1 = triangle is degenerate (a segment or point)
//             0 = disjoint (no intersect)
//             1 = intersect in unique point I1
//             2 = are in the same plane
public static int intersectRayAndTriangle(Ray R, Triangle T, float[] I)
{
    float[]    u, v, n;             // triangle vectors
    float[]    dir, w0, w;          // ray vectors
    float     r, a, b;             // params to calc ray-plane intersect

    // get triangle edge vectors and plane normal
    u =  Vector.minus(T.V1, T.V0);
    v =  Vector.minus(T.V2, T.V0);
    n =  Vector.crossProduct(u, v);             // cross product

    if (Arrays.equals(n, new float[]{0.0f, 0.0f, 0.0f})){           // triangle is degenerate
        return -1;                 // do not deal with this case
    }
    dir =  Vector.minus(R.P1, R.P0);             // ray direction vector
    w0 = Vector.minus( R.P0 , T.V0);
    a = - Vector.dot(n,w0);
    b =  Vector.dot(n,dir);
    if (Math.abs(b) < SMALL_NUM) {     // ray is parallel to triangle plane
        if (a == 0){                // ray lies in triangle plane
            return 2;
        }else{
            return 0;             // ray disjoint from plane
        }
    }

    // get intersect point of ray with triangle plane
    r = a / b;
    if (r < 0.0f){                   // ray goes away from triangle
        return 0;                  // => no intersect
    }
    // for a segment, also test if (r > 1.0) => no intersect

    float[] tempI =  Vector.addition(R.P0,  Vector.scalarProduct(r, dir));             
   //   intersect point of ray and plane
    I[0] = tempI[0];
    I[1] = tempI[1];
    I[2] = tempI[2];

    // is I inside T?
    float    uu, uv, vv, wu, wv, D;
    uu =  Vector.dot(u,u);
    uv =  Vector.dot(u,v);
    vv =  Vector.dot(v,v);
    w =  Vector.minus(I, T.V0);
    wu =  Vector.dot(w,u);
    wv = Vector.dot(w,v);
    D = (uv * uv) - (uu * vv);

    // get and test parametric coords
    float s, t;
    s = ((uv * wv) - (vv * wu)) / D;
    if (s < 0.0f || s > 1.0f)        // I is outside T
        return 0;
    t = (uv * wu - uu * wv) / D;
    if (t < 0.0f || (s + t) > 1.0f)  // I is outside T
        return 0;

    return 1;                      // I is in T
   }


}

就像我说的,我正在尝试获得一个工作示例,并了解如何实现这段代码。

我在android中开发了一个类似的方法。我有一个应用程序,它在GLSURFACHEVIEW上显示了许多3D球体,并使用光线拾取来查找单击的球体

这是我使用的代码。希望这对你有帮助

private void findTouchedSphere()
    {
        float[] view = new float[3];

        float[] cameraLookAt = new float[] { ... }; // the x/y/z of the point the camera is looking at
        float[] cameraPosition = new float[] { ... ; // the x/y/z of the position of the camera

        view[0] = cameraLookAt[0] - cameraPosition[0];
        view[1] = cameraLookAt[1] - cameraPosition[1];
        view[2] = cameraLookAt[2] - cameraPosition[2];

        normalizeVector(view); // make sure the view vector is a unit vector (|view| = 1)

        float[] h = new float[3];
        float[] cameraUp = new float[] {  ... }; // the up vector passed in GLU.gluLookAt

        h[0] = view[1] * cameraUp[2] - view[2] * cameraUp[1];
        h[1] = view[2] * cameraUp[0] - view[0] * cameraUp[2];
        h[2] = view[0] * cameraUp[1] - view[1] * cameraUp[0];
        normalizeVector(h);

        float[] v = new float[3];

        v[0] = h[1] * view[2] - h[2] * view[1];
        v[1] = h[2] * view[0] - h[0] * view[2];
        v[2] = h[0] * view[1] - h[1] * view[0];
        normalizeVector(v);

        float radians = 45f * (float)Math.PI / 180f; // 45f is the degree I pass in GLU.gluPerspective method

        //minCamDistance is the minimum camera distance I pass in GLU.gluPerspective
        float vLength = (float)Math.tan(radians / 2f) * minCamDistance;
        float hLength = vLength * mScreenWidth / mScreenHeight;

        v[0] *= vLength;
        v[1] *= vLength;
        v[2] *= vLength;

        h[0] *= hLength;
        h[1] *= hLength;
        h[2] *= hLength;

        // mTouchX, mTouchY are the screen coordinates of the touch
        // mScreenWidth, mScreenHeight are the screen dimensions
        float x = mTouchX - mScreenWidth / 2f;
        float y = mScreenHeight / 2f - mTouchY;

        x /= mScreenWidth / 2f;
        y /= mScreenHeight / 2f;

        float[] pos = new float[3];

        pos[0] = cameraPosition[0] + view[0] * minCamDistance + h[0] * x + v[0] * y;
        pos[1] = cameraPosition[1] + view[1] * minCamDistance + h[1] * x + v[1] * y;
        pos[2] = cameraPosition[2] + view[2] * minCamDistance + h[2] * x + v[2] * y;

        float[] dir = new float[3];
        dir[0] = pos[0] - cameraPosition[0];
        dir[1] = pos[1] - cameraPosition[1];
        dir[2] = pos[2] - cameraPosition[2];    

        float scalar;

        float[] startingPos = new float[] { cameraPosition[0], cameraPosition[1], cameraPosition[2]};
        float[] currentPosition = new float[3];
        float distance;

        for (Sphere sphere : mSpheres)
        {
            scalar = (sphere.getZ() - cameraPosition[2]) / dir[2];
            currentPosition[0] = cameraPosition[0] + scalar * dir[0];
            currentPosition[1] = cameraPosition[1] + scalar * dir[1];

            distance = (float)Math.sqrt((currentPosition[0] - sphere.getPosition().x) * (currentPosition[0] - sphere.getPosition().x) +
                    (currentPosition[1] - sphere.getPosition().y) * (currentPosition[1] - sphere.getPosition().y));

            if (distance <= 6) // 6 is the sphere size
            {
                performSomeActionOnHitSphere(sphere);
                return;
            }

        }
    }
private void normalizeVector(float[] vector)
    {
        float length = (float)Math.sqrt(vector[0] * vector[0] + vector[1] * vector[1] + vector[2] * vector[2]);
        vector[0] /= length;
        vector[1] /= length;
        vector[2] /= length;
    }