Python cv2使用最小化窗口捕获视频
我有一个小python脚本,使用cv2捕获检测到的第一张脸,并仅在cv2窗口中显示该区域。一切都很好 当前,最小化时视频馈送将冻结。 如果最小化托盘上的cv2窗口,如何允许脚本继续捕获视频 编辑 我还想知道是否有更好的方法,这样我就可以减少CPU上的负载。当前运行此脚本将占用我14-20%的cpuPython cv2使用最小化窗口捕获视频,python,opencv,face-recognition,imutils,Python,Opencv,Face Recognition,Imutils,我有一个小python脚本,使用cv2捕获检测到的第一张脸,并仅在cv2窗口中显示该区域。一切都很好 当前,最小化时视频馈送将冻结。 如果最小化托盘上的cv2窗口,如何允许脚本继续捕获视频 编辑 我还想知道是否有更好的方法,这样我就可以减少CPU上的负载。当前运行此脚本将占用我14-20%的cpu from __future__ import division from imutils.video import VideoStream import face_recognition import
from __future__ import division
from imutils.video import VideoStream
import face_recognition
import imutils
import cv2
POINTS = []
def landmarkTrackSmoothing(box, factor, maxPoints=30):
top = box[0][0]
bottom = box[0][1]
left = box[0][2]
right = box[0][3]
if len(POINTS) < maxPoints:
maxPoints = len(POINTS)
else:
del POINTS[0]
POINTS.append([top, bottom, left, right])
mean = [int((sum(col)/len(col))/factor) for col in zip(*POINTS)]
return mean
def cartoonFilter(roi):
# 1) Edges
gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# 2) Color
color = cv2.bilateralFilter(roi, 9, 300, 300)
# 3) Cartoon
return cv2.bitwise_and(color, color, mask=edges)
def OpenCamera():
vs = VideoStream(0 + cv2.CAP_DSHOW, framerate=120).start()
vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
roi = [0, 0, 0, 0]
prev = [0, 0, 0, 0]
# Add filter flags
cartoonEffect = False
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
frame = vs.read()
# downscale and convert to grayscale for fast processing
# of landmark locations
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = imutils.resize(frame, width=240)
# calculate upscale factor for landmark locations
factor = float(gray.shape[1]) / frame.shape[1]
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then
# the facial embeddings for each face
boxes = face_recognition.face_locations(gray)
box = list(map(list, boxes))
# t, b, l, r = 0, 0, 0, 0
# upscale landmark locations
for i in range(len(box)):
box = [landmarkTrackSmoothing(box, factor)]
# loop over the recognized faces
if (len(box) > 0):
i = 0
for (top, right, bottom, left) in box:
# grab frames from face coordinates
if (i == 0):
roi = frame[top:bottom, left:right]
prev = top, bottom, left, right
if cartoonEffect:
roi = cartoonFilter(roi)
i += 1
# check to see if we are supposed to display the output frame to
# the screen
if (len(box) == 0):
if (prev[0] > 0):
roi = frame[prev[0]:prev[1], prev[2]:prev[3]]
else:
roi = frame
cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
if (roi.any()):
cv2.imshow("Frame", roi)
cv2.resizeWindow("Frame", 512, 512)
# continue looping until quit: expandable to add dynamic key commands for filters
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if key == ord('c'):
if cartoonEffect:
cartoonEffect = False
else:
cartoonEffect = True
# do a bit of cleanup on quit
cv2.destroyAllWindows()
vs.stop()
# Begin capturing
OpenCamera()
来自未来进口部的
从imutils.video导入视频流
导入人脸识别
导入imutils
进口cv2
点数=[]
def landmarkTrackSmoothing(框,因子,最大点=30):
顶部=方框[0][0]
底部=框[0][1]
左=方框[0][2]
右=方框[0][3]
如果len(点)0):
i=0
对于(顶部、右侧、底部、左侧)输入框:
#从面坐标抓取帧
如果(i==0):
roi=帧[顶部:底部,左侧:右侧]
prev=顶部、底部、左侧、右侧
如果卡通效果:
roi=卡通过滤器(roi)
i+=1
#检查是否要显示输出帧
#屏幕
如果(len(box)==0):
如果(上一个[0]>0):
roi=帧[prev[0]:prev[1]、prev[2]:prev[3]]
其他:
roi=帧
cv2.namedWindow(“框架”,cv2.WINDOW_正常)
如果(roi.any()):
cv2.imshow(“帧”,roi)
cv2.调整窗口大小(“帧”,512,512)
#继续循环直到退出:可展开以添加过滤器的动态键命令
key=cv2.waitKey(1)和0xFF
如果键==ord(“q”):
打破
如果键==ord('c'):
如果卡通效果:
cartoneffect=假
其他:
卡通效果=真
#在退出时做一些清理
cv2.destroyAllWindows()
vs.stop()
#开始捕捉
OpenCamera()
请发布一个最小的可复制代码示例-发布您用于捕获视频的代码(最小代码,没有检测到人脸等并发症)。尝试发布每个人都可以执行的代码示例。代码示例应重现问题:“最小化时视频馈送将冻结”。