Python PyQt显示来自opencv的视频流

Python PyQt显示来自opencv的视频流,python,opencv,pyqt,pyqt5,qpixmap,Python,Opencv,Pyqt,Pyqt5,Qpixmap,尝试链接PyQt和Opencv视频源,无法理解如何应用while循环来连续播放视频。它只是拍一张静止的照片。请任何人帮助解决这个问题 PtQt=5 Python=3.6.1 问题是获取图像的函数只执行一次,而不更新标签。 正确的方法是将其放置在一个循环中,但这将导致阻塞主窗口。通过使用QThread类并通过信号QImage来更新标签,可以解决主窗口的阻塞问题。例如: 导入cv2 导入系统 从PyQt5.qtwidts导入QWidget、QLabel、QApplication 从PyQt5.

尝试链接PyQt和Opencv视频源,无法理解如何应用while循环来连续播放视频。它只是拍一张静止的照片。请任何人帮助解决这个问题

  • PtQt=5

  • Python=3.6.1



问题是获取图像的函数只执行一次,而不更新标签。
正确的方法是将其放置在一个循环中,但这将导致阻塞主窗口。通过使用
QThread
类并通过信号
QImage
来更新标签,可以解决主窗口的阻塞问题。例如:

导入cv2
导入系统
从PyQt5.qtwidts导入QWidget、QLabel、QApplication
从PyQt5.QtCore导入QThread、Qt、pyqtSignal、pyqtlot
从PyQt5.QtGui导入QImage、QPixmap
类线程(QThread):
changePixmap=pyqtSignal(QImage)
def运行(自):
cap=cv2.视频捕获(0)
尽管如此:
ret,frame=cap.read()
如果ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage=cv2.CVT颜色(帧,cv2.COLOR_BGR2RGB)
h、 w,ch=rgbImage.shape
bytesPerLine=ch*w
convertToQtFormat=QImage(rgbImage.data,w,h,bytesPerLine,QImage.Format_RGB888)
p=convertToQtFormat.scaled(640480,Qt.KeepAspectRatio)
self.changePixmap.emit(p)
类应用程序(QWidget):
定义初始化(自):
super()。\uuuu init\uuuuu()
[...]
self.initUI()
@pyqtSlot(QImage)
def setImage(自我,图像):
self.label.setPixmap(QPixmap.fromImage(图像))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left、self.top、self.width、self.height)
自我调整大小(18001200)
#创建一个标签
self.label=QLabel(self)
self.label.move(280120)
self.label.resize(640480)
th=螺纹(自)
th.changePixmap.connect(self.setImage)
th.start()
self.show()

谢谢Taimur Islam的提问。 谢谢你的精彩回答,我已经修改了你的代码。我使用了PtQt=4 Python=2.7,没有使用opencv

import sys    
import numpy as np
import flycapture2 as fc2

from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)

class Thread(QThread):
    changePixmap = pyqtSignal(QImage)

    def __init__(self, parent=None):
        QThread.__init__(self, parent=parent)       
        self.cameraSettings()


    def run(self):      
        while True:
            im = fc2.Image()
            self.c.retrieve_buffer(im)
            a = np.array(im)    

            rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)

            self.changePixmap.emit(rawImage)

    def cameraSettings(self):
        print(fc2.get_library_version())
        self.c = fc2.Context()
        numberCam = self.c.get_num_of_cameras()
        print(numberCam)    
        self.c.connect(*self.c.get_camera_from_index(0))
        print(self.c.get_camera_info())
        m, f = self.c.get_video_mode_and_frame_rate()
        print(m, f)
        print(self.c.get_property_info(fc2.FRAME_RATE))
        p = self.c.get_property(fc2.FRAME_RATE)
        print(p)
        self.c.set_property(**p)
        self.c.start_capture()


class App(QWidget):
    def __init__(self):
            super(App,self).__init__()
            self.title = 'PyQt4 Video'
            self.left = 100
            self.top = 100
            self.width = 640
            self.height = 480
            self.initUI()

    def initUI(self):
            self.setWindowTitle(self.title)
            self.setGeometry(self.left, self.top, self.width, self.height)
            self.resize(800, 600)
            # create a label
            self.label = QLabel(self)
            self.label.move(0, 0)
            self.label.resize(640, 480)
            th = Thread(self)
            th.changePixmap.connect(lambda p: self.setPixMap(p))
            th.start()

    def setPixMap(self, p):     
        p = QPixmap.fromImage(p)    
        p = p.scaled(640, 480, Qt.KeepAspectRatio)
        self.label.setPixmap(p)


if __name__ == '__main__':
    app = QApplication(sys.argv)
    ex = App()
    ex.show()
    sys.exit(app.exec_())

为PySide2和qimage2ndarray更新此

from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting

# Minimal implementation...

def displayFrame():
    ret, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    image = qimage2ndarray.array2qimage(frame)
    label.setPixmap(QPixmap.fromImage(image))

app = QApplication([])
window = QWidget()

# OPENCV

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

# timer for getting frames

timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button 
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()

# See also: https://gist.github.com/bsdnoobz/8464000


您是否介意在此答案上添加必要的
import
语句,以及使其完整和可运行所需的任何其他内容?看起来它需要cv2和PyQt(4/5)
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting

# Minimal implementation...

def displayFrame():
    ret, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    image = qimage2ndarray.array2qimage(frame)
    label.setPixmap(QPixmap.fromImage(image))

app = QApplication([])
window = QWidget()

# OPENCV

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

# timer for getting frames

timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button 
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()

# See also: https://gist.github.com/bsdnoobz/8464000