Python 由于Flask集成的帧到字节导致OpenCV问题
我正在使用以下代码:Python 由于Flask集成的帧到字节导致OpenCV问题,python,opencv,flask,Python,Opencv,Flask,我正在使用以下代码: def gen_frames(): # generate frame by frame from camera while True: # Capture frame-by-frame success, frame = camera.read() # read the camera frame if not success: break else: r
def gen_frames(): # generate frame by frame from camera
while True:
# Capture frame-by-frame
success, frame = camera.read() # read the camera frame
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
#frame_resized = cv2.resize(frame,None,fx=0.75,fy=0.75)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
我收到一个错误:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
TypeError: Expected Ptr<cv::UMat> for argument 'src'
我应该如何使其工作?已修复,需要移动一些代码。 现在的解决方案是:
from flask import Flask, render_template, Response
import numpy as np
import cv2
import pickle
import rtsp
import PIL as Image
import threading
import time
import queue
app = Flask(__name__)
url = 'rtsp://user:password@192.168.1.xxx:YYYY/stream0/mobotix.mjpeg'
#camera = cv2.VideoCapture(url) # use 0 for web camera
# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
face_cascade = cv2.CascadeClassifier('cascades\data\haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
def gen_frames():
cap = VideoCapture(url)
while True:
time.sleep(.5) # simulate time between events
frame = cap.read()
frame_detection(frame)
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
def frame_detection(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for (x,y,w,h) in faces:
print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
#recognize?
id_, conf = recognizer.predict(roi_gray)
if conf>=45: # and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (0,0,255)
stroke = 2
cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA)
#img_item = "my-image.png"
#cv2.imwrite(img_item, roi_gray)
color = (0, 0, 255)
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x,y), (end_cord_x, end_cord_y), color, stroke)
return frame
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0')
从烧瓶导入烧瓶,呈现模板,响应
将numpy作为np导入
进口cv2
进口泡菜
导入rtsp
将PIL作为图像导入
导入线程
导入时间
导入队列
app=烧瓶(名称)
url='1〕rtsp://user:password@192.168.1.xxx:YYYY/stream0/mobotix.mjpeg'
#摄像头=cv2.视频捕获(url)#将0用于网络摄像头
#供闭路电视摄影机使用rtsp://username:password@ip地址:554/user=username\u password='password'\u channel=channel\u number\u stream=0.sdp'而不是摄像头
face\u cascade=cv2.CascadeClassifier('cascades\data\haarcascade\u frontalface\u alt2.xml'))
recognizer=cv2.face.LBPHFaceRecognizer_create()
识别器读取(“trainer.yml”)
标签={“人名”:1}
将open(“labels.pickle”,“rb”)作为f:
og_标签=pickle.load(f)
labels={v:k代表k,在og_labels.items()中为v
类视频捕获:
定义初始化(self,name):
self.cap=cv2.VideoCapture(名称)
self.q=queue.queue()
t=线程。线程(目标=自身。\读卡器)
t、 daemon=True
t、 开始()
#帧可用时立即读取帧,只保留最新的帧
def_读卡器(自身):
尽管如此:
ret,frame=self.cap.read()
如果不是ret:
打破
如果不是self.q.empty():
尝试:
self.q.get_nowait()#放弃上一个(未处理的)帧
队列除外。空:
通过
自q.put(帧)
def读取(自):
返回self.q.get()
def gen_框架():
cap=视频捕获(url)
尽管如此:
time.sleep(.5)#模拟事件之间的时间
frame=cap.read()
帧检测(帧)
ret,buffer=cv2.imencode('.jpg',frame)
frame=buffer.tobytes()
产量(b'--帧\r\n'
b'Content-Type:image/jpeg\r\n\r\n'+frame+b'\r\n')#逐帧压缩并显示结果
def帧_检测(帧):
灰色=cv2.CVT颜色(边框,cv2.COLOR\u BGR2GRAY)
faces=face_级联。检测多尺度(灰色,scaleFactor=1.3,minNeighbors=5)
对于面中的(x,y,w,h):
打印(x、y、w、h)
roi_gray=灰色[y:y+h,x:x+w]
roi_color=帧[y:y+h,x:x+w]
#认识吗?
id\u,conf=recognizer.predict(roi\u灰色)
如果conf>=45:#和conf
from flask import Flask, render_template, Response
import numpy as np
import cv2
import pickle
import rtsp
import PIL as Image
import threading
import time
import queue
app = Flask(__name__)
url = 'rtsp://user:password@192.168.1.xxx:YYYY/stream0/mobotix.mjpeg'
#camera = cv2.VideoCapture(url) # use 0 for web camera
# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
face_cascade = cv2.CascadeClassifier('cascades\data\haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
def gen_frames():
cap = VideoCapture(url)
while True:
time.sleep(.5) # simulate time between events
frame = cap.read()
frame_detection(frame)
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
def frame_detection(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for (x,y,w,h) in faces:
print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
#recognize?
id_, conf = recognizer.predict(roi_gray)
if conf>=45: # and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (0,0,255)
stroke = 2
cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA)
#img_item = "my-image.png"
#cv2.imwrite(img_item, roi_gray)
color = (0, 0, 255)
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x,y), (end_cord_x, end_cord_y), color, stroke)
return frame
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0')