C++ 超环面与相机碰撞
我想实现在游戏区域随机扰动的6个圆环体的碰撞。这是一个简单的三维空间游戏,使用透视图和第一人称。我看到一些堆栈溢出的答案,建议计算任何(玩家)到圆环体单元的距离,若大于单元大小的一半或全部,则会碰撞+/-坐标系和地图拓扑调整。但是如果我们取这个距离,这意味着我们只考虑z坐标,那么如果相机移动到这个距离(不考虑x,y坐标),它总是认为是碰撞,这是错误的,对吗 我希望使用AABB算法来实现这一点。将相机位置和环面位置视为2个方框,检查碰撞(盒子到盒子碰撞)还是相机作为点和环面作为盒子(点到盒子),可以吗?或者有人能建议最好的方法吗 下面是我迄今为止尝试过的代码C++ 超环面与相机碰撞,c++,opengl,3d,collision-detection,glut,C++,Opengl,3d,Collision Detection,Glut,我想实现在游戏区域随机扰动的6个圆环体的碰撞。这是一个简单的三维空间游戏,使用透视图和第一人称。我看到一些堆栈溢出的答案,建议计算任何(玩家)到圆环体单元的距离,若大于单元大小的一半或全部,则会碰撞+/-坐标系和地图拓扑调整。但是如果我们取这个距离,这意味着我们只考虑z坐标,那么如果相机移动到这个距离(不考虑x,y坐标),它总是认为是碰撞,这是错误的,对吗 我希望使用AABB算法来实现这一点。将相机位置和环面位置视为2个方框,检查碰撞(盒子到盒子碰撞)还是相机作为点和环面作为盒子(点到盒子),可
float im[16], m[16], znear = 0.1, zfar = 100.0, fovx = 45.0 * M_PI / 180.0;
glm::vec3 p0, p1, p2, p3, o, u, v;
//p0, p1, p2, p3 holds your znear camera screen corners in world coordinates
void ChangeSize(int w, int h)
{
GLfloat fAspect;
// Prevent a divide by zero
if(h == 0)
h = 1;
// Set Viewport to window dimensions
glViewport(0, 0, w, h);
// Calculate aspect ratio of the window
fAspect = (GLfloat)w*1.0/(GLfloat)h;
// Set the perspective coordinate system
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// field of view of 45 degrees, near and far planes 1.0 and 1000
//that znear and zfar should typically have a ratio of 1000:1 to make sorting out z depth easier for the GPU
gluPerspective(45.0f, fAspect, 0.1f, 300.0f); //may need to make larger depending on project
// Modelview matrix reset
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// get camera matrix (must be in right place in code before model transformations)
glGetFloatv(GL_MODELVIEW_MATRIX, im); // get camera inverse matrix
matrix_inv(m, im); // m = inverse(im)
u = glm::vec3(m[0], m[1], m[2]); // x axis
v = glm::vec3(m[4], m[5], m[6]); // y axis
o = glm::vec3(m[12], m[13], m[14]); // origin
o -= glm::vec3(m[8], m[9], m[10]) * znear; // z axis offset
// scale by FOV
u *= znear * tan(0.5 * fovx);
v *= znear * tan(0.5 * fovx / fAspect);
// get rectangle coorners
p0 = o - u - v;
p1 = o + u - v;
p2 = o + u + v;
p3 = o - u + v;
}
void matrix_inv(float* a, float* b) // a[16] = Inverse(b[16])
{
float x, y, z;
// transpose of rotation matrix
a[0] = b[0];
a[5] = b[5];
a[10] = b[10];
x = b[1]; a[1] = b[4]; a[4] = x;
x = b[2]; a[2] = b[8]; a[8] = x;
x = b[6]; a[6] = b[9]; a[9] = x;
// copy projection part
a[3] = b[3];
a[7] = b[7];
a[11] = b[11];
a[15] = b[15];
// convert origin: new_pos = - new_rotation_matrix * old_pos
x = (a[0] * b[12]) + (a[4] * b[13]) + (a[8] * b[14]);
y = (a[1] * b[12]) + (a[5] * b[13]) + (a[9] * b[14]);
z = (a[2] * b[12]) + (a[6] * b[13]) + (a[10] * b[14]);
a[12] = -x;
a[13] = -y;
a[14] = -z;
}
//Store torus coordinates
std::vector<std::vector<GLfloat>> translateTorus = { { 0.0, 1.0, -10.0, 1 }, { 0.0, 4.0, -6.0, 1 } , { -1.0, 0.0, -4.0, 1 },
{ 3.0, 1.0, -6.0, 1 }, { 1.0, -1.0, -9.0, 1 } , { 4.0, 1.0, -4.0, 1 } };
GLfloat xpos, ypos, zpos, flagToDisplayCrystal;
//Looping through 6 Torus
for (int i = 0; i < translateTorus.size(); i++) {
//Get the torus coordinates
xpos = translateTorus[i][0];
ypos = translateTorus[i][1];
zpos = translateTorus[i][2];
//This variable will work as a variable to display crystal after collision
flagToDisplayCrystal = translateTorus[i][3];
//p0 min, p2 max
//Creating a square using Torus index coordinates and radius
double halfside = 1.0 / 2;
//This (xpos+halfside), (xpos-halfside), (ypos+halfside), (ypos-halfside) are //created using Torus index and radius
float d1x = p0[0] - (xpos + halfside);
float d1y = p0[1] - (ypos + halfside);
float d2x = (xpos - halfside) - p2[0];
float d2y = (ypos - halfside) - p2[1];
//Collision is checking here
//For square's min z and max z is checking whether equal to camera's min //z and max z
if ((d1x > 0.0f || d1y > 0.0f || d2x > 0.0f || d2y > 0.0f) && p2[2] == zpos && p0[2] == zpos) {
//If there is collision update the variable as 0
translateTorus[i][3] = 0;
}
else {
if (flagToDisplayCrystal == 1) {
glPushMatrix();
glEnable(GL_TEXTURE_2D);
glTranslatef(xpos, ypos, zpos);
glRotatef(fPlanetRot, 0.0f, -1.0f, 0.0f);
glColor3f(0.0, 0.0, 0.0);
// Select the texture object
glBindTexture(GL_TEXTURE_2D, textures[3]);
glutSolidTorus(0.1, 1.0, 30, 30);
glDisable(GL_TEXTURE_2D);
glPopMatrix();
}
}
}
float im[16],m[16],znear=0.1,zfar=100.0,fovx=45.0*m_-PI/180.0;
glm::vec3 p0,p1,p2,p3,o,u,v;
//p0,p1,p2,p3在世界坐标系中保持您的znear相机屏幕角
无效更改大小(整数w、整数h)
{
glfaspect;
//防止被零除
如果(h==0)
h=1;
//将视口设置为窗口尺寸
glViewport(0,0,w,h);
//计算窗口的纵横比
fAspect=(GLfloat)w*1.0/(GLfloat)h;
//设置透视坐标系
glMatrixMode(GL_投影);
glLoadIdentity();
//45度视野,近平面和远平面1.0和1000
//znear和zfar的比例通常应为1000:1,以使GPU更容易进行z深度排序
gluPerspective(45.0f,fAspect,0.1f,300.0f);//可能需要根据项目进行放大
//模型视图矩阵重置
glMatrixMode(GLU模型视图);
glLoadIdentity();
//获取摄影机矩阵(在模型转换之前,必须在代码中的正确位置)
glGetFloatv(GL_MODELVIEW_MATRIX,im);//获取相机逆矩阵
矩阵_inv(m,im);//m=逆(im)
u=glm::vec3(m[0],m[1],m[2]);//x轴
v=glm::vec3(m[4],m[5],m[6]);//y轴
o=glm::vec3(m[12],m[13],m[14]);//原点
o-=glm::vec3(m[8],m[9],m[10])*znear;//z轴偏移
//视场比例
u*=znear*tan(0.5*fovx);
v*=znear*tan(0.5*fovx/fAspect);
//获取矩形坐标
p0=o-u-v;
p1=o+u-v;
p2=o+u+v;
p3=o-u+v;
}
无效矩阵_inv(float*a,float*b)//a[16]=逆(b[16])
{
浮动x,y,z;
//旋转矩阵转置
a[0]=b[0];
a[5]=b[5];
a[10]=b[10];
x=b[1];a[1]=b[4];a[4]=x;
x=b[2];a[2]=b[8];a[8]=x;
x=b[6];a[6]=b[9];a[9]=x;
//复制投影部分
a[3]=b[3];
a[7]=b[7];
a[11]=b[11];
a[15]=b[15];
//转换原点:新位置=-新旋转矩阵*旧位置
x=(a[0]*b[12])+(a[4]*b[13])+(a[8]*b[14]);
y=(a[1]*b[12])+(a[5]*b[13])+(a[9]*b[14]);
z=(a[2]*b[12])+(a[6]*b[13])+(a[10]*b[14]);
a[12]=-x;
a[13]=-y;
a[14]=-z;
}
//存储圆环坐标
向量translateTorus={{0.0,1.0,-10.0,1},{0.0,4.0,-6.0,1},{-1.0,0.0,-4.0,1},
{ 3.0, 1.0, -6.0, 1 }, { 1.0, -1.0, -9.0, 1 } , { 4.0, 1.0, -4.0, 1 } };
GLfloat XPO、YPO、zpos、flagToDisplayCrystal;
//穿过6个圆环
对于(int i=0;i0.0f | | d1y>0.0f | | d2x>0.0f | | | d2y>0.0f)和&p2[2]==zpos和&p0[2]==zpos){
//如果发生冲突,则将变量更新为0
translateTorus[i][3]=0;
}
否则{
如果(flagToDisplayCrystal==1){
glPushMatrix();
glEnable(GL_纹理_2D);
GLTRANSTEF(XPO、YPO、zpos);
glRotatef(fPlanetRot,0.0f,-1.0f,0.0f);
GL3F(0.0,0.0,0.0);
//选择纹理对象
glBindTexture(GL_TEXTURE_2D,纹理[3]);
固形环(0.1,1.0,30,30);
glDisable(GL_纹理_2D);
glPopMatrix();
}
}
}
正如我在评论中提到的,您有两个选项,要么使用OpenGL渲染,要么完全在CPU端进行计算,而不使用它。让我们先从渲染开始:
glColor??(?)
)而不使用照明或明暗处理或其他方式。但是不要交换缓冲区因为这样会在屏幕上显示内容并导致闪烁
glReadPix
n = normalize(cross(p1-p0,p2-p1)) // is rectangle normal
dq = normalize(q1-q0) // is line direction
q = q0 + dq*dot(dq,p1-p0) // is plane/line intersection
GL_MODELVIEW = Inverse(Camera)*Rendered_Object
float aspect=float(xs)/float(ys); // aspect from OpenGL window resolution
float im[16],m[16],znear=0.1,zfar=100.0,fovx=60.0*M_PI/180.0;
vec3 p0,p1,p2,p3,o,u,v; // 3D vectors
// this is how my perspective is set
// glMatrixMode(GL_PROJECTION);
// glLoadIdentity();
// gluPerspective(fovx*180.0/(M_PI*aspect),aspect,znear,zfar);
// get camera matrix (must be in right place in code before model transformations)
glGetFloatv(GL_MODELVIEW_MATRIX,im); // get camera inverse matrix
matrix_inv(m,im); // m = inverse(im)
u =vec3(m[ 0],m[ 1],m[ 2]); // x axis
v =vec3(m[ 4],m[ 5],m[ 6]); // y axis
o =vec3(m[12],m[13],m[14]); // origin
o-=vec3(m[ 8],m[ 9],m[10])*znear; // z axis offset
// scale by FOV
u*=znear*tan(0.5*fovx);
v*=znear*tan(0.5*fovx/aspect);
// get rectangle coorners
p0=o-u-v;
p1=o+u-v;
p2=o+u+v;
p3=o-u+v;
// render it for debug
glColor3f(1.0,1.0,0.0);
glBegin(GL_QUADS);
glColor3f(1.0,0.0,0.0); glVertex3fv(p0.dat);
glColor3f(0.0,0.0,0.0); glVertex3fv(p1.dat);
glColor3f(0.0,0.0,1.0); glVertex3fv(p2.dat);
glColor3f(1.0,1.0,1.0); glVertex3fv(p3.dat);
glEnd();
void matrix_inv(float *a,float *b) // a[16] = Inverse(b[16])
{
float x,y,z;
// transpose of rotation matrix
a[ 0]=b[ 0];
a[ 5]=b[ 5];
a[10]=b[10];
x=b[1]; a[1]=b[4]; a[4]=x;
x=b[2]; a[2]=b[8]; a[8]=x;
x=b[6]; a[6]=b[9]; a[9]=x;
// copy projection part
a[ 3]=b[ 3];
a[ 7]=b[ 7];
a[11]=b[11];
a[15]=b[15];
// convert origin: new_pos = - new_rotation_matrix * old_pos
x=(a[ 0]*b[12])+(a[ 4]*b[13])+(a[ 8]*b[14]);
y=(a[ 1]*b[12])+(a[ 5]*b[13])+(a[ 9]*b[14]);
z=(a[ 2]*b[12])+(a[ 6]*b[13])+(a[10]*b[14]);
a[12]=-x;
a[13]=-y;
a[14]=-z;
}