Python 从OpenCV到PyQt获取网络摄像头镜头
我正在尝试使用opencv从摄像头获取网络摄像头数据,然后在PyQt gui中显示这些数据。我以前通过使用.after函数访问Tkinter主窗口循环来使用Tkinter实现这一点。然而,PyQt似乎没有相同的可用性,为了让另一个循环与应用程序一起运行,您需要使用单独的线程。这就是我想到的:Python 从OpenCV到PyQt获取网络摄像头镜头,python,opencv,pyqt,Python,Opencv,Pyqt,我正在尝试使用opencv从摄像头获取网络摄像头数据,然后在PyQt gui中显示这些数据。我以前通过使用.after函数访问Tkinter主窗口循环来使用Tkinter实现这一点。然而,PyQt似乎没有相同的可用性,为了让另一个循环与应用程序一起运行,您需要使用单独的线程。这就是我想到的: import sys import cv2 from PyQt4 import QtGui from PyQt4 import QtCore from PyQt4.QtGui import QImage i
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = None
self.camera = cv2.VideoCapture(0)
b, self.frame = self.camera.read()
self.label = QtGui.QLabel()
self.workThread = WorkThread(self)
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
self.qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self, parent):
QtCore.QThread.__init__(self)
self.parent = parent
def __del__(self):
self.wait()
def run(self):
while True:
self.emit(QtCore.SIGNAL('update_Camera'), "_")
self.terminate()
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.draw()
sys.exit(app.exec_())
我的想法很简单:我将创建一个带有循环的线程,该循环发出一个信号,通知主应用程序进行更新。(显然,我不想要一个带有while-True循环的线程,但我只是为了方便而使用它,并计划在我能保证这个想法可行时替换它)。但是,由于从未调用draw()函数,因此信号似乎没有注册。知道我做错了什么吗?我对OpenCV一无所知,所以我只能猜测问题所在 我猜你只读取了一次视频数据。如果是视频流,则必须不断读取和解释数据
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
update_video = QtCore.pyqtSignal()
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.workThread = WorkThread(self.readVideo)
self.update_video.connect(self.draw)
def start(self):
self.workerThread.start()
def stop(self):
self.workThread.alive = False
self.workThread.stop()
def readVideo(self):
"""Note this method is executed in a thread. No drawing can happen in a thread. Emit a signal to draw items."""
b, self.frame = self.camera.read()
self.update_video.emit() # Signals are slow this may happen too fast
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
#self.workThread.alive = False
#self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
# self.label.show() # The label is now a part of the widget layout
class WorkThread(QtCore.QThread):
def __init__(self, target=None, args=(), kwargs={}):
QtCore.QThread.__init__(self)
# I don't know how Qt's threads work, so I am treating it like a python thread
self.target = target
self.args = args
self.kwargs = kwargs
self.alive = True
def run(self):
while self.alive:
self.target(*self.args, **self.kwargs)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
由于每秒只更新这么多次,因此可能需要使用计时器,而不是线程。计时器可能更容易使用,也更安全
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.timer= QtCore.QTimer()
self.timer.setInterval(300)
self.timer.timeout.connect(self.draw_camera)
def start(self):
self.timer.start()
def stop(self):
self.timer.stop()
def draw_camera(self):
"""You can draw in a timer, so just read the data and draw however fast you want."""
print "I should Redraw"
b, frame = self.camera.read()
height, width, channel = frame.shape
bpl = 3 * width
qImg = QImage(frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
我一直在做与你的问题非常相似的事情。我修改了你的代码并在我的Windows PC上进行了测试 这里的关键点是,您必须将cv2 camera对象放入
工作线程
,在run()方法中读取main while循环中的每个帧,最后将图像发送到QWidget对象以显示它。通过这种方式,您可以获得图像捕获和显示的连续迭代
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
# Use super() to call __init__() methods in the parent classes
super(VideoCapture, self).__init__()
# The instantiated QLabel object should belong to the 'self' QWidget object
self.label = QtGui.QLabel(self) # <- So put 'self' in the parenthesis
# Set the QLabel geometry to fit the image dimension (640, 480)
# The top left corner (0, 0) is the position within the QWidget main window
self.label.setGeometry(0,0,640,480)
# Instantiate a QThread object. No need to pass in the parent QWidget object.
self.workThread = WorkThread()
# Connect signal from self.workThread to the slot self.draw
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
event.accept()
def draw(self, img):
print "I should Redraw"
height, width, channel = img.shape
bpl = 3 * width
self.qImg = QImage(img, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self):
# Use super() to call __init__() methods in the parent classes
super(WorkThread, self).__init__()
# Place the camera object in the WorkThread
self.camera = cv2.VideoCapture(0)
# The boolean variable to break the while loop in self.run() method
self.running = True
def run(self):
while self.running:
# Read one frame
b, self.frame = self.camera.read()
# Emit self.frame to the QWidget object
self.emit(QtCore.SIGNAL('update_Camera'), self.frame)
def stop(self):
# Terminate the while loop in self.run() method
self.running = False
app = QtGui.QApplication(sys.argv)
video_capture_widget = VideoCapture()
video_capture_widget.show()
sys.exit(app.exec_())
导入系统
进口cv2
从PyQt4导入QtGui
从PyQt4导入QtCore
从PyQt4.QtGui导入QImage
导入时间
类视频捕获(QtGui.QWidget):
def uuu init uuu(self,parent=None):
#使用super()调用父类中的uuu init_uuuu()方法
超级(视频捕捉,自我)。\uuuuu初始化
#实例化的QLabel对象应该属于“self”QWidget对象
self.label=QtGui.QLabel(self)#现在评论这个已经太迟了。我想知道为什么定时器在这种情况下比线程更好,而且更安全?这是否也减少了内存使用?在我的例子中,我想为我的屏幕播放视频显示一个使用上述技术的cam窗口,因此,我不想在后期制作中这样做,因为这需要更多的精力和时间,我希望在我录制屏幕时直接包括它。用定时器做这个好吗?我的意思是相当长的时间运行应用程序。更新!我最终使用@linyc74建议的工作线程。计时器有时在第一次打开时会出现故障。这是很久以前的事了。我想我更喜欢定时器,因为我有其他重要的代码运行,这是一个优先事项。有时python线程是贪婪的,如果主线程没有等待IO,就不会释放到主线程。相机是一个不错的功能,但不是我的应用程序的主要用途。