Python 2.7 使用信号将dictionary对象发送到线程类

Python 2.7 使用信号将dictionary对象发送到线程类,python-2.7,pyqt,pyqt5,Python 2.7,Pyqt,Pyqt5,我使用PyQt5和Python2.7 我有UIWidget类、PlayStreading类和Thread类 一旦按下UIWidget中的按钮,PlayStreaming中的dictionary对象就会被发送到Thread类 如果我删除“QVariantMap”,我可以接收按钮点击信号,但我不能发送数据 我怎样才能解决这个问题 我的全部代码如下 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QAppli

我使用PyQt5和Python2.7

我有UIWidget类、PlayStreading类和Thread类

一旦按下UIWidget中的按钮,PlayStreaming中的dictionary对象就会被发送到Thread类

如果我删除“QVariantMap”,我可以接收按钮点击信号,但我不能发送数据

我怎样才能解决这个问题

我的全部代码如下

from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow, QInputDialog
import cv2
import time
import face_recognition.api as face_recognition

class Thread(QtCore.QThread):    
    changePixmap = QtCore.pyqtSignal(QtGui.QImage)
    updateStatus = QtCore.pyqtSignal(str)
    scaled_size = QtCore.QSize(640, 480)          
    curScale=1.0
    facearray=[]
    dim=(640,480) 
    processedImage=[]
    def run(self):
        cap = cv2.VideoCapture(-1)
        cap.set(3,1280);
        cap.set(4,1024);
        time.sleep(2)
        self.maxHeight=cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        self.maxScale=self.maxHeight/480.0        
        while True:
            ret, frame = cap.read()                  
            if ret:
                r=1                
                rescaleSize=int(480*self.curScale)                              
                if(frame.shape[0] > 480 and frame.shape[1] > 640):
                    r = rescaleSize / float(frame.shape[0])
                    self.dim = (int(frame.shape[1] * r), rescaleSize)
                    processedImage=cv2.resize(frame, self.dim, fx=0.0, fy=0.0)
                    face_locations = face_recognition.face_locations(processedImage)
                    if(len(face_locations) > 0):
                        encodefaces(facelocs)
                else:
                    processedImage=frame.copy()
                    face_locations = face_recognition.face_locations(processedImage)
                    if(len(face_locations) > 0):
                        encodefaces(facelocs)
                for face_location in face_locations:  
                    top, right, bottom, left = face_location
                    cv2.rectangle(frame,(int(right/r),int(top/r)),(int(left/r),int(bottom/r)),(0,255,0),2)
                rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QtGui.QImage.Format_RGB888)                
                p = convertToQtFormat.scaled(self.scaled_size, QtCore.Qt.KeepAspectRatio)
                self.changePixmap.emit(p)

    @QtCore.pyqtSlot(QtCore.QSize)
    def scaled(self, scaled_size):
        self.scaled_size = scaled_size 

    @QtCore.pyqtSlot()
    def scaleup(self):
        self.curScale = self.curScale + 0.1
        if self.curScale > self.maxScale:
            self.curScale = self.maxScale
        self.updateStatus.emit('Cur scale:'+str(self.dim))

    @QtCore.pyqtSlot()
    def scaledown(self):
        self.curScale = self.curScale - 0.1
        if self.curScale < 1.0:
            self.curScale = 1.0
        self.updateStatus.emit('Cur scale:'+str(self.dim))

    @QtCore.pyqtSlot('QVariantMap')
    def getfacestorecognize(self, clickedInfos):
        facearray.append(clickedInfos)
        print(clickedInfos['x']+' '+clickedInfos['y']+' '+clickedInfos['name'])

    def encodefaces(self, facelocs):
        if(len(self.facearray) > 0):
            for face in facearray:
               r=(self.scaled_size[0]/self.dim[0])
               x=int(face['x'])*r
               y=int(face['y'])*r
               #for loc in facelocs:


class PlayStreaming(QtWidgets.QLabel):
    reSize = QtCore.pyqtSignal(QtCore.QSize)
    scaleupSignal = QtCore.pyqtSignal()
    scaledownSignal = QtCore.pyqtSignal()
    transferFaceInfosSignal = QtCore.pyqtSignal()#'QVariantMap'    

    def __init__(self):
        super(PlayStreaming, self).__init__()
        self.initUI()
        self.mousePressEvent = self.showDialog

    @QtCore.pyqtSlot(QtGui.QImage)
    def setImage(self, image):
        self.label.setPixmap(QtGui.QPixmap.fromImage(image))

    def initUI(self):
        # create a label
        self.label = QtWidgets.QLabel(self)        
        th = Thread(self)
        th.changePixmap.connect(self.setImage)
        th.updateStatus.connect(self.handle_status_message)
        self.scaleupSignal.connect(th.scaleup)
        self.scaledownSignal.connect(th.scaledown)
        self.transferFaceInfosSignal.connect(th.getfacestorecognize)
        self.reSize.connect(th.scaled)
        th.start()
        lay = QtWidgets.QVBoxLayout(self)
        lay.addWidget(self.label, alignment=QtCore.Qt.AlignCenter)

    def resizeEvent(self, event):
        self.reSize.emit(self.size())

    def showDialog(self, event): 
        x = event.pos().x()
        y = event.pos().y()
        facedata={"x": str(x),  "y": str(y),  "name": ''}
        text, ok = QInputDialog.getText(self, 'Name input dialog', 
            'Enter name:')        
        if (ok and str(text)!=''):
            facedata['name']=str(text)
            self.transferFaceInfosSignal.emit(facedata)

    def handle_status_message(self, message):
        self.window().set_status_message(message)    


class UIWidget(QtWidgets.QWidget):    
    def __init__(self, parent=None):
        super(UIWidget, self).__init__(parent)        
        # Initialize tab screen
        self.tabs = QtWidgets.QTabWidget()
        self.tab1 = QtWidgets.QWidget()
        self.tab2 = QtWidgets.QWidget()
        self.tab3 = QtWidgets.QWidget()

        # Add tabs
        self.tabs.addTab(self.tab1, "Face")
        self.tabs.addTab(self.tab2, "Human")
        self.tabs.addTab(self.tab3, "Vehicle")

        self.display = PlayStreaming()
        # Create first tab
        self.createGridLayout()
        self.tab1.layout = QtWidgets.QVBoxLayout()
        self.tab1.layout.addWidget(self.display, stretch=1)
        self.tab1.layout.addWidget(self.horizontalGroupBox)
        self.tab1.setLayout(self.tab1.layout)

        # Add tabs to widget
        layout = QtWidgets.QVBoxLayout(self)
        layout.addWidget(self.tabs)

    def createGridLayout(self):
        self.horizontalGroupBox = QtWidgets.QGroupBox("")
        self.horizontalGroupBox.setStyleSheet("QGroupBox{ background-color: red; border: none;}")  
        hlay1 = QtWidgets.QHBoxLayout()
        self.TestButton=QtWidgets.QPushButton('Test')
        hlay1.addWidget(self.TestButton) 
        self.RunButton=QtWidgets.QPushButton('Run')
        hlay1.addWidget(self.RunButton) 
        self.ScaleUpButton=QtWidgets.QPushButton('ScaleUp')
        self.ScaleUpButton.clicked.connect(self.display.scaleupSignal)
        hlay1.addWidget(self.ScaleUpButton) 
        self.ScaleDownButton=QtWidgets.QPushButton('ScaleDown')
        self.ScaleDownButton.clicked.connect(self.display.scaledownSignal)
        hlay1.addWidget(self.ScaleDownButton) 
        hlay1.addWidget(QtWidgets.QPushButton('Reset'))

        hlay2 = QtWidgets.QHBoxLayout()
        hlay2.addWidget(QtWidgets.QPushButton('Set Faces')) 
        hlay2.addWidget(QtWidgets.QPushButton('FacePose'))
        hlay2.addWidget(QtWidgets.QPushButton('Gender')) 
        hlay2.addWidget(QtWidgets.QPushButton('Age'))
        self.RecognizeButton=QtWidgets.QPushButton('Recognize')
        self.RecognizeButton.clicked.connect(self.display.transferFaceInfosSignal)
        hlay2.addWidget(self.RecognizeButton)

        layout = QtWidgets.QVBoxLayout()        
        layout.addLayout(hlay1)
        layout.addLayout(hlay2)
        self.horizontalGroupBox.setLayout(layout)

class App(QMainWindow): 
    def __init__(self):
        super(App,self).__init__()
        self.title = 'FaceHumanVehicle'
        self.left = 10
        self.top = 10
        self.width = 1000
        self.height = 800   
        self.initUI()

    def initUI(self):
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)
        self.form_widget = UIWidget(self) 
        self.statusBar().showMessage('') 
        self.setCentralWidget(self.form_widget) 
        self.show()

    def set_status_message(self, message):
        return self.statusBar().showMessage(message) 

if __name__ == '__main__':
    import sys
    app = QApplication(sys.argv)
    ex = App()
    sys.exit(app.exec_())
从PyQt5导入QtCore、QtGui、qtwidget
从PyQt5.QtWidgets导入QApplication、QMainWindow、QInputDialog
进口cv2
导入时间
导入face_recognition.api作为face_recognition
类线程(QtCore.QThread):
changePixmap=QtCore.pyqtSignal(QtGui.QImage)
updateStatus=QtCore.pyqtSignal(str)
缩放尺寸=QtCore.QSize(640480)
光标=1.0
facearray=[]
尺寸=(640480)
ProcesseImage=[]
def运行(自):
cap=cv2.视频捕获(-1)
第二套(31280);
第二套(41024);
时间。睡眠(2)
self.maxHeight=cap.get(cv2.cap\u PROP\u FRAME\u HEIGHT)
self.maxScale=self.maxHeight/480.0
尽管如此:
ret,frame=cap.read()
如果ret:
r=1
重新缩放=int(480*self.curScale)
如果(帧形状[0]>480和帧形状[1]>640):
r=重新缩放/浮动(frame.shape[0])
self.dim=(int(frame.shape[1]*r),重新缩放)
ProcesseImage=cv2.调整大小(帧,self.dim,fx=0.0,fy=0.0)
面部位置=面部识别。面部位置(处理图像)
如果(透镜(面_位置)>0):
编码面(facelocs)
其他:
processeImage=frame.copy()
面部位置=面部识别。面部位置(处理图像)
如果(透镜(面_位置)>0):
编码面(facelocs)
对于面位置中的面位置:
上、右、下、左=面位置
cv2.矩形(框架,(int(右),int(上/右)),(int(左/右),int(下/右)),(0255,0),2)
rgbImage=cv2.CVT颜色(帧,cv2.COLOR_BGR2RGB)
convertToQtFormat=QtGui.QImage(rgbImage.data,rgbImage.shape[1],rgbImage.shape[0],QtGui.QImage.Format_RGB888)
p=convertToQtFormat.scaled(self.scaled_size,QtCore.Qt.keepasspectratio)
self.changePixmap.emit(p)
@pyqtSlot(QtCore.QSize)
def缩放(自身、缩放大小):
自缩放大小=缩放大小
@QtCore.pyqtSlot()
def缩放(自):
self.curScale=self.curScale+0.1
如果self.curScale>self.maxScale:
self.curScale=self.maxScale
self.updateStatus.emit('Cur scale:'+str(self.dim))
@QtCore.pyqtSlot()
def缩放向下(自):
self.curScale=self.curScale-0.1
如果self.curScale<1.0:
self.curScale=1.0
self.updateStatus.emit('Cur scale:'+str(self.dim))
@pyqtSlot('QVariantMap')
def GetFaceStoreCongnition(自我,单击FoS):
facearray.append(单击图标)
打印(clickedInfos['x']+''+clickedInfos['y']+''+clickedInfos['name']))
def编码面(自身、面LOC):
如果(len(self.facearray)>0):
对于面阵列中的面:
r=(自缩放尺寸[0]/self.dim[0])
x=int(面['x'])*r
y=int(面['y'])*r
#对于面loc中的loc:
类播放流(QtWidgets.QLabel):
reSize=QtCore.pyqtSignal(QtCore.QSize)
scaleupSignal=QtCore.pyqtSignal()
scaledownSignal=QtCore.pyqtSignal()
transferFaceInfosSignal=QtCore.pyqtSignal()#'QVariantMap'
定义初始化(自):
超级(播放流,自我)。\uuuuu初始化
self.initUI()
self.mousePressEvent=self.showDialog
@pyqtlot(QtGui.QImage)
def setImage(自我,图像):
self.label.setPixmap(QtGui.QPixmap.fromImage(image))
def initUI(self):
#创建一个标签
self.label=qtwidts.QLabel(self)
th=螺纹(自)
th.changePixmap.connect(self.setImage)
th.updateStatus.connect(self.handle\u status\u消息)
self.scaleupSignal.connect(th.scaleup)
self.scaledownSignal.connect(第个scaledown)
self.transferFaceInfosSignal.connect(th.getfacestorerecognize)
自调整大小连接(第次缩放)
th.start()
lay=qtwidts.QVBoxLayout(self)
lay.addWidget(self.label,alignment=QtCore.Qt.AlignCenter)
def resizeEvent(自我,事件):
self.reSize.emit(self.size())
def showDialog(自我、事件):
x=事件.pos().x()
y=事件.位置().y()
facedata={“x”:str(x),“y”:str(y),“name”:“”}
text,ok=QInputDialog.getText(self,“名称输入对话框”,
'输入名称:')
如果(确定和str(文本)!=“”):
facedata['name']=str(文本)
self.transferFaceInfosSignal.emit(facedata)
def句柄_状态_消息(自身,消息):
self.window().set_status_消息(消息)
类UIWidget(QtWidgets.QWidget):
def uuu init uuu(self,parent=None):
超级(UIWidget,self)。\uuuuu初始化\uuuuuuu(父级)
#初始化选项卡屏幕
self.tabs=QtWidgets.QTabWidget()
self.tab1=qtwidts.QWidget()
self.tab2=qtwidts.QWidget()
self.tab3=qtwidts.QWidget()
#添加选项卡
self.tabs.addTab(self.tab1,“面”)
self.tabs.addTab(self.tab2,“人”)
self.tabs.addTab(self.tab3,“车辆”)
self.display=playthreaming()
#创建第一个选项卡
self.createGridLayout()
self.tab1.layout=qtwidts.QVBoxLayout()
self.tab1.layout.addWidget(self.display,stretch=1)
self.tab1.layout.addWidget(self.horizontalGroupBox)
选择
class PlayStreaming(QtWidgets.QLabel):
    reSize = QtCore.pyqtSignal(QtCore.QSize)
    scaleupSignal = QtCore.pyqtSignal()
    scaledownSignal = QtCore.pyqtSignal()
    transferFaceInfosSignal = QtCore.pyqtSignal('QVariantMap') # <--- +++

    def __init__(self):
        super(PlayStreaming, self).__init__()
        self.facedata = {"x": "", "y": "", "name": ""} # <--- +++
        self.initUI()
        # self.mousePressEvent = self.showDialog <--- ---

    # ...

    def mousePressEvent(self, event):
        self.facedata["x"] = str(event.pos().x())
        self.facedata["y"] = str(event.pos().y())
        self.showDialog()
        super(PlayStreaming, self).mousePressEvent(event)

    def showDialog(self): 
        text, ok = QtWidgets.QInputDialog.getText(self, 'Name input dialog', 'Enter name:')        
        if ok and text:
            self.facedata['name']= text

    @QtCore.pyqtSlot()
    def send_signal(self)
        if self.facedata["name"]:
            self.transferFaceInfosSignal.emit(self.facedata)

class UIWidget(QtWidgets.QWidget): 
    # ...
    def createGridLayout(self):
        # ...
        self.RecognizeButton.clicked.connect(self.display.send_signal)
        # ...