Opencv 如何将pygame图像转换为Open CV图像?
我目前正在使用Pygame和pykinect2从Kinect2摄像头获取实时RGB视频。我想把它转换成一个开放的cv图像,这样它将有助于我进一步的计算Opencv 如何将pygame图像转换为Open CV图像?,opencv,pygame,kinect-v2,pykinect,Opencv,Pygame,Kinect V2,Pykinect,我目前正在使用Pygame和pykinect2从Kinect2摄像头获取实时RGB视频。我想把它转换成一个开放的cv图像,这样它将有助于我进一步的计算 import pykinect2 import pygame import cv2 import ctypes from pykinect2 import PyKinectV2 from pykinect2 import PyKinectRuntime kinectcam = PyKinectRuntime.PyKinectRuntime(PyK
import pykinect2
import pygame
import cv2
import ctypes
from pykinect2 import PyKinectV2
from pykinect2 import PyKinectRuntime
kinectcam = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
def draw_color_frame(frame, target_surface):
target_surface.lock()
address = kinectcam.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame.ctypes.data, frame.size)
del address
target_surface.unlock()
pygame.init()
frame_surface = pygame.Surface((kinectcam.color_frame_desc.Width, kinectcam.color_frame_desc.Height), 0, 32)
clock = pygame.time.Clock()
pygame.display.set_caption("Kinect View")
infoObject = pygame.display.Info()
screen = pygame.display.set_mode((infoObject.current_w >> 1, infoObject.current_h >> 1),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
screen = pygame.display.set_mode(event.dict['size'],
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
if kinectcam.has_new_color_frame():
frame = kinectcam.get_last_color_frame()
draw_color_frame(frame, frame_surface)
frame = None
h_to_w = float(frame_surface.get_height()) / frame_surface.get_width()
target_height = int(h_to_w * screen.get_width())
surface_to_draw = pygame.transform.scale(frame_surface, (screen.get_width(), target_height));
screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.display.flip()
clock.tick(60)
pygame.quit()
kinectcam.close()
我假设您正在尝试转换您正在光点显示的图像(从曲面到绘图)。要将pygame.Surface对象转换为opencv图像,请执行以下操作:
# create a copy of the surface
view = pygame.surfarray.array3d(surface_to_draw)
# convert from (width, height, channel) to (height, width, channel)
view = view.transpose([1, 0, 2])
# convert from rgb to bgr
img_bgr = cv2.cvtColor(view, cv2.COLOR_RGB2BGR)
更新:我还假设你的pygame图像是彩色图像。我假设你正在尝试转换你正在闪烁的图像(从曲面到绘图)。要将pygame.Surface对象转换为opencv图像,请执行以下操作:
# create a copy of the surface
view = pygame.surfarray.array3d(surface_to_draw)
# convert from (width, height, channel) to (height, width, channel)
view = view.transpose([1, 0, 2])
# convert from rgb to bgr
img_bgr = cv2.cvtColor(view, cv2.COLOR_RGB2BGR)
更新:我还假设您的pygame图像是彩色图像。使用PyKinect 2库,您可以创建一个acquisitionClass.py,它定义了Kinect帧处理所需的不同方法和属性。然后从主脚本(Run.py)调用acquisitionClass.py,它包含执行转换的获取颜色框架(),以使用转换后的颜色框架并将其显示到opencv框架,如下所示: 注意:我的回答是假设您不想使用pyGame库,而是使用OpenCV Run.py
import cv2
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from acquisitionKinect import AcquisitionKinect
from frame import Frame
if __name__ == '__main__':
kinect = AcquisitionKinect()
frame = Frame()
while True:
kinect.get_frame(frame)
kinect.get_color_frame()
image = kinect._frameRGB
#OpenCv uses RGB image, kinect returns type RGBA, remove extra dim.
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
if not image is None:
cv2.imshow("Output-Keypoints",image)
cv2.waitKey(30)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
class Frame():
frameRGB = None
frameDepth = None
frameDepthQuantized = None
frameSkeleton = None
frame_num = 0
shoulder_orientation_euler = None
shoulder_orientation_quat = None
acquisitionKinect.py
import ctypes
import _ctypes
import sys
if sys.hexversion >= 0x03000000:
import _thread as thread
else:
import thread
class AcquisitionKinect():
#Create a constructor to initialize different types of array and frame objects
def __init__(self, resolution_mode=1.0):
self.resolution_mode = resolution_mode
self._done = False
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body | PyKinectV2.FrameSourceTypes_Depth)
# here we will store skeleton data
self._bodies = None
self.body_tracked = False
self.joint_points = np.array([])
self.joint_points3D = np.array([])
self.joint_points_RGB = np.array([])
self.joint_state = np.array([])
self._frameRGB = None
self._frameDepth = None
self._frameDepthQuantized = None
self._frameSkeleton = None
self.frameNum = 0
def get_frame(self, frame):
self.acquireFrame()
frame.ts = int(round(time.time() * 1000))
self.frameNum += 1
frame.frameRGB = self._frameRGB
frame.frameDepth = self._frameDepth
frame.frameDepthQuantized = self._frameDepthQuantized
frame.frameSkeleton = self._frameSkeleton
#Get a color frame object
def get_color_frame(self):
self._frameRGB = self._kinect.get_last_color_frame()
self._frameRGB = self._frameRGB.reshape((1080, 1920,-1)).astype(np.uint8)
self._frameRGB = cv2.resize(self._frameRGB, (0,0), fx=1/self.resolution_mode, fy=1/self.resolution_mode)
#Acquire the type of frame required
def acquireFrame(self):
if self._kinect.has_new_color_frame():
self.get_color_frame()
def close(self):
self._kinect.close()
self._frameDepth = None
self._frameRGB = None
self._frameSkeleton = None
Frame.py
import cv2
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from acquisitionKinect import AcquisitionKinect
from frame import Frame
if __name__ == '__main__':
kinect = AcquisitionKinect()
frame = Frame()
while True:
kinect.get_frame(frame)
kinect.get_color_frame()
image = kinect._frameRGB
#OpenCv uses RGB image, kinect returns type RGBA, remove extra dim.
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
if not image is None:
cv2.imshow("Output-Keypoints",image)
cv2.waitKey(30)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
class Frame():
frameRGB = None
frameDepth = None
frameDepthQuantized = None
frameSkeleton = None
frame_num = 0
shoulder_orientation_euler = None
shoulder_orientation_quat = None
使用PyKinect 2库,您可以创建acquisitionClass.py,它定义Kinect帧处理所需的不同方法和属性。然后从主脚本(Run.py)调用acquisitionClass.py,它包含执行转换的获取颜色框架(),以使用转换后的颜色框架并将其显示到opencv框架,如下所示: 注意:我的回答是假设您不想使用pyGame库,而是使用OpenCV Run.py
import cv2
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from acquisitionKinect import AcquisitionKinect
from frame import Frame
if __name__ == '__main__':
kinect = AcquisitionKinect()
frame = Frame()
while True:
kinect.get_frame(frame)
kinect.get_color_frame()
image = kinect._frameRGB
#OpenCv uses RGB image, kinect returns type RGBA, remove extra dim.
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
if not image is None:
cv2.imshow("Output-Keypoints",image)
cv2.waitKey(30)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
class Frame():
frameRGB = None
frameDepth = None
frameDepthQuantized = None
frameSkeleton = None
frame_num = 0
shoulder_orientation_euler = None
shoulder_orientation_quat = None
acquisitionKinect.py
import ctypes
import _ctypes
import sys
if sys.hexversion >= 0x03000000:
import _thread as thread
else:
import thread
class AcquisitionKinect():
#Create a constructor to initialize different types of array and frame objects
def __init__(self, resolution_mode=1.0):
self.resolution_mode = resolution_mode
self._done = False
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body | PyKinectV2.FrameSourceTypes_Depth)
# here we will store skeleton data
self._bodies = None
self.body_tracked = False
self.joint_points = np.array([])
self.joint_points3D = np.array([])
self.joint_points_RGB = np.array([])
self.joint_state = np.array([])
self._frameRGB = None
self._frameDepth = None
self._frameDepthQuantized = None
self._frameSkeleton = None
self.frameNum = 0
def get_frame(self, frame):
self.acquireFrame()
frame.ts = int(round(time.time() * 1000))
self.frameNum += 1
frame.frameRGB = self._frameRGB
frame.frameDepth = self._frameDepth
frame.frameDepthQuantized = self._frameDepthQuantized
frame.frameSkeleton = self._frameSkeleton
#Get a color frame object
def get_color_frame(self):
self._frameRGB = self._kinect.get_last_color_frame()
self._frameRGB = self._frameRGB.reshape((1080, 1920,-1)).astype(np.uint8)
self._frameRGB = cv2.resize(self._frameRGB, (0,0), fx=1/self.resolution_mode, fy=1/self.resolution_mode)
#Acquire the type of frame required
def acquireFrame(self):
if self._kinect.has_new_color_frame():
self.get_color_frame()
def close(self):
self._kinect.close()
self._frameDepth = None
self._frameRGB = None
self._frameSkeleton = None
Frame.py
import cv2
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from acquisitionKinect import AcquisitionKinect
from frame import Frame
if __name__ == '__main__':
kinect = AcquisitionKinect()
frame = Frame()
while True:
kinect.get_frame(frame)
kinect.get_color_frame()
image = kinect._frameRGB
#OpenCv uses RGB image, kinect returns type RGBA, remove extra dim.
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
if not image is None:
cv2.imshow("Output-Keypoints",image)
cv2.waitKey(30)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
class Frame():
frameRGB = None
frameDepth = None
frameDepthQuantized = None
frameSkeleton = None
frame_num = 0
shoulder_orientation_euler = None
shoulder_orientation_quat = None
如何修改上述代码以获得深度帧?它可以像颜色帧一样调用,下面是acquisitionKinect.py在此github链接上的整个实现:我修改了Run.py程序以获取深度帧,但在屏幕上看不到它。我无法找出哪里出了问题。请提供帮助。我如何修改上述代码以获得深度框?它可以像颜色框一样调用,下面是acquisitionKinect.py在这个github链接上的整个实现:我修改了Run.py程序以获取深度框,但在我的屏幕上看不到它。我无法找出哪里出了问题。请帮忙。