C++ 实时对象绘制
我正在尝试对对象纹理执行实时绘制。现在使用Irrlicht,但这并不重要 到目前为止,我已经使用此算法获得了正确的UV坐标:C++ 实时对象绘制,c++,math,3d,textures,irrlicht,C++,Math,3d,Textures,Irrlicht,我正在尝试对对象纹理执行实时绘制。现在使用Irrlicht,但这并不重要 到目前为止,我已经使用此算法获得了正确的UV坐标: 找出用户选择的对象的三角形(光线投射,无) (真的很难) 找出上交点的UV(重心)坐标 那个三角形 找出每个三角形顶点的UV(纹理)坐标 找出交点的UV(纹理)坐标 计算交点的纹理图像坐标 但不知何故,当我在纹理图像的第五步中绘制点时,我得到了完全错误的结果。因此,在光标点中绘制矩形时,其X(或Z)坐标将反转: 下面是我用来获取纹理坐标的代码: core::vecto
core::vector2df getPointUV(core::triangle3df tri, core::vector3df p)
{
core::vector3df
v0 = tri.pointC - tri.pointA,
v1 = tri.pointB - tri.pointA,
v2 = p - tri.pointA;
float dot00 = v0.dotProduct(v0),
dot01 = v0.dotProduct(v1),
dot02 = v0.dotProduct(v2),
dot11 = v1.dotProduct(v1),
dot12 = v1.dotProduct(v2);
float invDenom = 1.f / ((dot00 * dot11) - (dot01 * dot01)),
u = (dot11 * dot02 - dot01 * dot12) * invDenom,
v = (dot00 * dot12 - dot01 * dot02) * invDenom;
scene::IMesh* m = Mesh->getMesh(((scene::IAnimatedMeshSceneNode*)Model)->getFrameNr());
core::array<video::S3DVertex> VA, VB, VC;
video::SMaterial Material;
for (unsigned int i = 0; i < m->getMeshBufferCount(); i++)
{
scene::IMeshBuffer* mb = m->getMeshBuffer(i);
video::S3DVertex* vertices = (video::S3DVertex*) mb->getVertices();
for (unsigned long long v = 0; v < mb->getVertexCount(); v++)
{
if (vertices[v].Pos == tri.pointA)
VA.push_back(vertices[v]); else
if (vertices[v].Pos == tri.pointB)
VB.push_back(vertices[v]); else
if (vertices[v].Pos == tri.pointC)
VC.push_back(vertices[v]);
if (vertices[v].Pos == tri.pointA || vertices[v].Pos == tri.pointB || vertices[v].Pos == tri.pointC)
Material = mb->getMaterial();
if (VA.size() > 0 && VB.size() > 0 && VC.size() > 0)
break;
}
if (VA.size() > 0 && VB.size() > 0 && VC.size() > 0)
break;
}
core::vector2df
A = VA[0].TCoords,
B = VB[0].TCoords,
C = VC[0].TCoords;
core::vector2df P(A + (u * (C - A)) + (v * (B - A)));
core::dimension2du Size = Material.getTexture(0)->getSize();
CursorOnModel = core::vector2di(Size.Width * P.X, Size.Height * P.Y);
int X = Size.Width * P.X, Y = Size.Height * P.Y;
// DRAWING SOME RECTANGLE
Material.getTexture(0)->lock(true);
Device->getVideoDriver()->setRenderTarget(Material.getTexture(0), true, true, 0);
Device->getVideoDriver()->draw2DRectangle(video::SColor(255, 0, 100, 75), core::rect<s32>((X - 10), (Y - 10),
(X + 10), (Y + 10)));
Device->getVideoDriver()->setRenderTarget(0, true, true, 0);
Material.getTexture(0)->unlock();
return core::vector2df(X, Y);
}
core::vector2df getPointUV(core::triangle3df tri,core::vector3df p)
{
core::vector3df
v0=三点C-三点A,
v1=三点B-三点A,
v2=p-三点A;
浮点数00=v0。点数积(v0),
dot01=v0.dotProduct(v1),
dot02=v0.dotProduct(v2),
dot11=v1.dotProduct(v1),
dot12=v1.dotProduct(v2);
float invDenom=1.f/((dot00*dot11)-(dot01*dot01)),
u=(dot11*dot02-dot01*dot12)*INVDNOM,
v=(dot00*dot12-dot01*dot02)*INVDNOM;
场景::IMesh*m=Mesh->getMesh(((场景::IAnimatedMeshSceneNode*)模型)->getFrameNr();
核心:数组VA、VB、VC;
视频材料;
对于(无符号整数i=0;igetMeshBufferCount();i++)
{
场景::IMeshBuffer*mb=m->getMeshBuffer(i);
视频::S3DVertex*顶点=(视频::S3DVertex*)mb->GetVertex();
for(无符号长v=0;vgetVertexCount();v++)
{
if(顶点[v].Pos==tri.pointA)
向后推(顶点[v]);否则
if(顶点[v].Pos==tri.pointB)
VB.向后推(顶点[v]);否则
if(顶点[v].Pos==tri.pointC)
VC.向后推(顶点[v]);
如果(顶点[v].Pos==tri.pointA | |顶点[v].Pos==tri.pointB | |顶点[v].Pos==tri.pointC)
Material=mb->getMaterial();
如果(VA.size()>0&&VB.size()>0&&VC.size()>0)
打破
}
如果(VA.size()>0&&VB.size()>0&&VC.size()>0)
打破
}
core::vector2df
A=VA[0]。t字词,
B=VB[0]。t命令,
C=VC[0].TCoords;
向量2dfp(A+(u*(C-A))+(v*(B-A));
core::dimension2du Size=Material.getTexture(0)->getSize();
CursorOnModel=core::vector2di(大小.宽度*P.X,大小.高度*P.Y);
int X=大小.宽度*P.X,Y=大小.高度*P.Y;
//画一些长方形
Material.getTexture(0)->lock(true);
Device->getVideoDriver()->setRenderTarget(Material.getTexture(0),true,true,0);
Device->getVideoDriver()->draw2DRectangle(视频::SColor(255,01000,75),core::rect((X-10),(Y-10),
(X+10),(Y+10));
设备->getVideoDriver()->setRenderTarget(0,真,真,0);
Material.getTexture(0)->unlock();
返回核::矢量2DF(X,Y);
}
我只是想让我的物体可以实时绘制。我目前的问题是:纹理坐标计算错误和非唯一顶点UV坐标(因此,在矮人斧头的一侧绘制某个东西会在斧头的另一侧绘制相同的东西)
我该怎么做呢?我可以使用您的代码库并让它为我工作 关于第二个问题“非唯一顶点UV坐标”: 嗯,你是绝对正确的,你需要独特的Vertexuv来实现这一点,这意味着你必须打开你的模型,而不是利用共享的uv空间,例如镜像元素和其他东西。(例如,左/右引导-如果它们使用相同的uv空间,您将在两者上自动绘制,其中一个为红色,另一个为绿色)。您可以检出“uvlayout”(工具)或3ds max中的uv展开修改器 关于第一个也是更重要的问题:“**纹理坐标计算错误”: baycentric坐标的计算是正确的,但我认为您的输入数据是错误的。我假设您使用irrlicht的CollisionManager和TriangleSelector得到三角形和碰撞点。问题是,三角形顶点的位置(从collisionTest中获得的返回值)在WorldCoordinates中。但在计算时,需要将它们放在模型坐标中,因此需要执行以下操作: 伪代码:
irr::core::vector2df getPointUV(irr::core::triangle3df tri,irr::core::vector3df p,irr::SceneNode*pMeshNode,irr::video::IVideoDriver*pDriver)
{
irr::core::matrix4逆变换(
pMeshNode->getAbsoluteTransformation(),
irr::core::matrix4::EM4CONST_(逆);
反变换变换向量(三点A);
反变换变换向量(三点B);
反变换变换向量(三点C);
irr::core::vector3df
v0=三点C-三点A,
v1=三点B-三点A,
v2=p-三点A;
浮点数00=v0。点数积(v0),
dot01=v0.dotProduct(v1),
dot02=v0.dotProduct(v2),
dot11=v1.dotProduct(v1),
dot12=v1.dotProduct(v2);
float invDenom=1.f/((dot00*dot11)-(dot01*dot01)),
u=(dot11*dot02-dot01*dot12)*INVDNOM,
irr::core::vector2df getPointUV(irr::core::triangle3df tri, irr::core::vector3df p, irr::scene::IMeshSceneNode* pMeshNode, irr::video::IVideoDriver* pDriver)
{
irr::core::matrix4 inverseTransform(
pMeshNode->getAbsoluteTransformation(),
irr::core::matrix4::EM4CONST_INVERSE);
inverseTransform.transformVect(tri.pointA);
inverseTransform.transformVect(tri.pointB);
inverseTransform.transformVect(tri.pointC);
irr::core::vector3df
v0 = tri.pointC - tri.pointA,
v1 = tri.pointB - tri.pointA,
v2 = p - tri.pointA;
float dot00 = v0.dotProduct(v0),
dot01 = v0.dotProduct(v1),
dot02 = v0.dotProduct(v2),
dot11 = v1.dotProduct(v1),
dot12 = v1.dotProduct(v2);
float invDenom = 1.f / ((dot00 * dot11) - (dot01 * dot01)),
u = (dot11 * dot02 - dot01 * dot12) * invDenom,
v = (dot00 * dot12 - dot01 * dot02) * invDenom;
irr::video::S3DVertex A, B, C;
irr::video::S3DVertex* vertices = static_cast<irr::video::S3DVertex*>(
pMeshNode->getMesh()->getMeshBuffer(0)->getVertices());
for(unsigned int i=0; i < pMeshNode->getMesh()->getMeshBuffer(0)->getVertexCount(); ++i)
{
if( vertices[i].Pos == tri.pointA)
{
A = vertices[i];
}
else if( vertices[i].Pos == tri.pointB)
{
B = vertices[i];
}
else if( vertices[i].Pos == tri.pointC)
{
C = vertices[i];
}
}
irr::core::vector2df t2 = B.TCoords - A.TCoords;
irr::core::vector2df t1 = C.TCoords - A.TCoords;
irr::core::vector2df uvCoords = A.TCoords + t1*u + t2*v;
return uvCoords;
}