Python 将深度投影到原始网格上

Python 将深度投影到原始网格上,python,transformation,blender,depth,Python,Transformation,Blender,Depth,我正在尝试获取对象的blender渲染深度贴图,然后将其移动以覆盖原始对象。目前,我对渲染对象并将其提取到它的位置没有问题 但是,当我试图将对象定位到其原始位置时,我被卡住了 我正在尝试将反向摄影机世界矩阵应用于渲染的点云(蓝色)。不幸的是,当我应用所说的相机反转时,它并没有出现在我期望的地方(红色) 我已经附上了我必须复制这种行为的全部代码。如果有人能给我指出我应该乘以点云的正确矩阵,我将不胜感激 从mathutils导入向量、四元数、Euler、矩阵 将numpy作为np导入 进口bpy

我正在尝试获取对象的blender渲染深度贴图,然后将其移动以覆盖原始对象。目前,我对渲染对象并将其提取到它的位置没有问题

但是,当我试图将对象定位到其原始位置时,我被卡住了

我正在尝试将反向摄影机世界矩阵应用于渲染的点云(蓝色)。不幸的是,当我应用所说的相机反转时,它并没有出现在我期望的地方(红色)

我已经附上了我必须复制这种行为的全部代码。如果有人能给我指出我应该乘以点云的正确矩阵,我将不胜感激

从mathutils导入向量、四元数、Euler、矩阵
将numpy作为np导入
进口bpy
def main_脚本():
清除场景()
准备视图()
tmp_path=“/tmp/tmp_render.exr”
场景=获取场景(“场景”)
照相机=创建照相机(“照相机”)
camera.rotation_euler=euler([np.pi*0.5,0,np.pi*0.5],“XYZ”)
camera.location=向量([4.5,0,1])
bpy.ops.mesh.primitive\u monkey\u添加(
位置=(0,0,1),旋转=(0,0,np.pi*0.5),大小=1.0)
_w、 _h=640480
更新_场景()
初始渲染(场景、摄影机、宽度=640、高度=480)
更新_场景()
矩阵K=从混合器(场景、摄影机、数据)中获取矩阵K
_fy,_fx=矩阵_K[0][0],矩阵_K[1][1]
_cy,_cx=矩阵_K[0][2],矩阵_K[1][2]
scene.render.filepath=tmp_路径
bpy.ops.render.render(write\u still=True)
深度=读取exr(tmp路径,“R”)[“R”]
深度=np.重塑(将深度转换为numpy(深度),[\uH,\uW])
exr_云=深度_到_云(
_w、 _h、_fx、_fy、_cx、_cy、深度)
exr_cloud=np.重塑(exr_cloud,[-1,3])
exr_云=exr_云[(exr_云[…,2]<100)和(exr_云[…,2]>0)]
矩阵=np.重塑(camera.matrix_world,[4,4])
矩阵=np.linalg.inv(矩阵)#为什么不能正确放置深度
顶点=np.one([exr_cloud.shape[0],4],dtype=np.float32)
顶点[:,0:3]=exr_云
顶点=np.数组(
[matrix@vertex for vertex in vertex],dtype=np.float32)
顶点=顶点[…,:3]
创建网格(“Suzanne\u EXR”,EXR\u cloud,[])
创建网格(“SuzanneT_EXR”,顶点,[])
"""
运行脚本所需的实用程序方法
"""
def clear_场景():
对于bpy.data.scenes中的场景:
对于场景中的obj.objects:
bpy.context.collection.objects.unlink(obj)
def read_exr(路径、通道):
将OpenEXR导入为_OpenEXR
将Imath导入为_Imath
file=\u OpenEXR.InputFile(路径)
浮点=_Imath.PixelType(_Imath.PixelType.FLOAT)
结果={}
对于接入信道:
结果[ch]=file.channel(ch,FLOAT)
file.close()文件
返回结果
def转换为numpy(数据):
将数组导入为_数组
返回np.array(_array.array(“f”,data).tolist())
def update_场景():
dg=bpy.context.evaluated_depsgraph_get()
dg.update()
def prepare_view():
首选项=bpy.context.preferences
preferences.view.show\u工具提示\u python=True
preferences.view.show\u developer\u ui=True
preferences.view.render\u display\u type=“无”
def init_渲染(场景、摄影机、宽度=无、高度=无):
def set_渲染_设置(摄影机、场景、宽度=640、高度=480):
image\u设置=scene.render.image\u设置
image\u settings.file\u format=“OPEN\u EXR”
image_settings.use_zbuffer=True
scene.render.resolution_x=宽度
scene.render.resolution_y=高度
#scene.render.use_抗锯齿=False
scene.use_nodes=True
scene.camera=摄像机
node_tree=scene.node_tree
节点=节点\树。节点
节点\渲染\层=节点[“渲染层”]
node_composite=节点[“composite”]
node_tree.links.clear()
node_tree.links.new(
节点\渲染\层。输出[“深度”],节点\复合。输入[“图像”])
设置渲染设置(摄影机、场景)
def get_场景(名称):返回bpy.data.scenes[名称]
def创建_摄像头(名称):
camera=bpy.data.cameras.new(名称)
照相机镜头=50
obj=bpy.data.objects.new(名称、相机)
bpy.context.collection.objects.link(obj)
返回obj
# ---------------------------------------------------------------
#来自Blender摄像机的3x4 P矩阵
# ---------------------------------------------------------------
#从混合器相机数据构建内部相机参数
#
#请参见中的注释
#blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
def从混合器获取校准矩阵(场景、摄像机):
从mathutils将矩阵导入为_矩阵
f_in_mm=照相机镜头
分辨率_x_in_px=场景.render.resolution_x
分辨率_y_in_px=scene.render.resolution_y
比例=scene.render.resolution_百分比/100
传感器\u宽度\u英寸\u毫米=摄像头。传感器\u宽度
传感器高度(单位:毫米)=摄像机高度
pixel\u aspect\u ratio=场景.render.pixel\u aspect\u x/场景.render.pixel\u aspect\u y
如果(camera.sensor_fit==‘垂直’):
#传感器高度固定(传感器安装水平),
#传感器宽度随像素纵横比有效变化
s_u=分辨率_x_in_px*比例/传感器宽度_in_mm/像素纵横比
s_v=分辨率_y_in_px*刻度/传感器高度_in_mm
其他:#“水平”和“自动”
#传感器宽度固定(传感器安装为水平),
#传感器高度随像素纵横比有效变化
pixel\u aspect\u ratio=场景.render.pixel\u aspect\u x/场景.render.pixel\u aspect\u y
s_u=分辨率x_in_px*刻度/传感器宽度
s_v=分辨率_y_(单位:px)*比例*像素_纵横比/传感器_高度_(单位:mm)
#本征校正矩阵K的参数
alpha_=f_in_mm*s_
alpha_v=f_in_mm*s_v
u_0=分辨率_x_in_px*刻度/2
v_0=分辨率_y_,单位为_px*刻度/2
歪斜=0#仅限
from mathutils import Vector, Quaternion, Euler, Matrix
import numpy as np
import bpy


def main_script():
    clear_scene()
    prepare_views()

    tmp_path = "/tmp/tmp_render.exr"
    scene = get_scene("Scene")
    camera = create_camera("Camera")
    camera.rotation_euler = Euler([np.pi * 0.5, 0, np.pi * 0.5], "XYZ")
    camera.location = Vector([4.5, 0, 1])

    bpy.ops.mesh.primitive_monkey_add(
        location=(0, 0, 1), rotation=(0, 0, np.pi*0.5), size=1.0)

    _w, _h = 640, 480

    update_scene()
    init_rendering(scene, camera, width=640, height=480)
    update_scene()

    matrix_K = get_calibration_matrix_K_from_blender(scene, camera.data)
    _fy, _fx = matrix_K[0][0], matrix_K[1][1]
    _cy, _cx = matrix_K[0][2], matrix_K[1][2]

    scene.render.filepath = tmp_path
    bpy.ops.render.render(write_still=True)
    depth = read_exr(tmp_path, "R")["R"]
    depth = np.reshape(convert_to_numpy(depth), [_h, _w])
    exr_cloud = depth_to_cloud(
        _w, _h, _fx, _fy, _cx, _cy, depth)
    exr_cloud = np.reshape(exr_cloud, [-1, 3])
    exr_cloud = exr_cloud[(exr_cloud[..., 2] < 100) & (exr_cloud[..., 2] > 0)]

    matrix = np.reshape(camera.matrix_world, [4, 4])
    matrix = np.linalg.inv(matrix) # why doesn't this place the depth properly

    vertices = np.ones([exr_cloud.shape[0], 4], dtype=np.float32)
    vertices[:, 0:3] = exr_cloud
    vertices = np.array(
        [matrix @ vertex for vertex in vertices], dtype=np.float32)
    vertices = vertices[..., :3]

    create_mesh("Suzanne_EXR", exr_cloud, [])
    create_mesh("SuzanneT_EXR", vertices, [])

"""
    utilities methods required to run the script
"""

def clear_scene():
    for scene in bpy.data.scenes:
        for obj in scene.objects:
            bpy.context.collection.objects.unlink(obj)

def read_exr(path, channels):
    import OpenEXR as _OpenEXR
    import Imath as _Imath

    file = _OpenEXR.InputFile(path)

    FLOAT = _Imath.PixelType(_Imath.PixelType.FLOAT)

    results = {}
    for ch in channels:
        results[ch] = file.channel(ch, FLOAT)

    file.close()

    return results


def convert_to_numpy(data):
    import array as _array
    return np.array(_array.array("f", data).tolist())


def update_scene():
    dg = bpy.context.evaluated_depsgraph_get()
    dg.update()


def prepare_views():
    preferences = bpy.context.preferences

    preferences.view.show_tooltips_python = True
    preferences.view.show_developer_ui = True
    preferences.view.render_display_type = "NONE"


def init_rendering(scene, camera, width=None, height=None):
    def set_rendering_settings(camera, scene, width=640, height=480):
        image_settings = scene.render.image_settings
        image_settings.file_format = "OPEN_EXR"
        image_settings.use_zbuffer = True

        scene.render.resolution_x = width
        scene.render.resolution_y = height
        # scene.render.use_antialiasing = False

    scene.use_nodes = True
    scene.camera = camera
    node_tree = scene.node_tree
    nodes = node_tree.nodes

    node_render_layers = nodes["Render Layers"]
    node_composite = nodes["Composite"]

    node_tree.links.clear()
    node_tree.links.new(
        node_render_layers.outputs["Depth"], node_composite.inputs["Image"])

    set_rendering_settings(camera, scene)


def get_scene(name): return bpy.data.scenes[name]


def create_camera(name):
    camera = bpy.data.cameras.new(name)
    camera.lens = 50

    obj = bpy.data.objects.new(name, camera)
    bpy.context.collection.objects.link(obj)

    return obj

# ---------------------------------------------------------------
# 3x4 P matrix from Blender camera
# ---------------------------------------------------------------

# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model


def get_calibration_matrix_K_from_blender(scene, camera):
    from mathutils import Matrix as _Matrix
    f_in_mm = camera.lens
    resolution_x_in_px = scene.render.resolution_x
    resolution_y_in_px = scene.render.resolution_y
    scale = scene.render.resolution_percentage / 100
    sensor_width_in_mm = camera.sensor_width
    sensor_height_in_mm = camera.sensor_height
    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
    if (camera.sensor_fit == 'VERTICAL'):
        # the sensor height is fixed (sensor fit is horizontal),
        # the sensor width is effectively changed with the pixel aspect ratio
        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
        s_v = resolution_y_in_px * scale / sensor_height_in_mm
    else:  # 'HORIZONTAL' and 'AUTO'
        # the sensor width is fixed (sensor fit is horizontal),
        # the sensor height is effectively changed with the pixel aspect ratio
        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
        s_u = resolution_x_in_px * scale / sensor_width_in_mm
        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm

    # Parameters of intrinsic calibration matrix K
    alpha_u = f_in_mm * s_u
    alpha_v = f_in_mm * s_v
    u_0 = resolution_x_in_px * scale / 2
    v_0 = resolution_y_in_px * scale / 2
    skew = 0  # only use rectangular pixels

    K = _Matrix(
        ((alpha_u, skew,    u_0),
         (0, alpha_v, v_0),
         (0, 0,        1)))
    return K


def create_mesh(name, vertices, faces):
    import bmesh as _bmesh
    mesh = bpy.data.meshes.new("Mesh_%s" % name)
    mesh.from_pydata(vertices, [], faces)
    mesh.update()

    obj = bpy.data.objects.new(name, mesh)

    bpy.context.collection.objects.link(obj)

    bm = _bmesh.new()
    bm.from_mesh(mesh)

    bm.to_mesh(mesh)
    bm.free()

    return obj


def depth_to_cloud(w, h, fx, fy, cx, cy, depth):
    from numpy import concatenate as _concat
    from numpy import indices as _indices
    from numpy import newaxis as _newaxis

    indices = _indices(depth.shape)

    indices_y, indices_x = indices

    ys, xs, zs = \
        (indices_y - cy) * depth / fy, \
        (indices_x - cx) * depth / fx, \
        depth

    points = _concat([xs[..., _newaxis], ys[..., _newaxis],
                      zs[..., _newaxis]], axis=2)

    return points


if __name__ == "__main__":
    raise main_script()
    matrix_cam = np.reshape(camera.matrix_world, [4, 4])
    mat_scale = np.array(Matrix.Scale(-1, 4))

    matrix = matrix_cam @ mat_scale

    vertices = np.ones([exr_cloud.shape[0], 4], dtype=np.float32)
    vertices[:, 0:3] = exr_cloud
    vertices = np.array(
        [matrix @ vertex for vertex in vertices], dtype=np.float32)
    vertices = vertices[..., :3]
    ys, xs, zs = \
        (indices_y - cx) * depth / fx, \
        (indices_x - cy) * depth / fy, \
        depth