Python Opencv,续订;“第一”;框架

Python Opencv,续订;“第一”;框架,python,opencv,opencv-python,Python,Opencv,Opencv Python,我学习了opencv运动检测教程。我在几个网站上找到了代码,大致相同:在一个while循环中,在捕获第一帧作为基本帧后,将其与下一帧进行比较,找出差异 我想每15分钟更新一次第一帧(考虑白天的光线条件),但我不明白为什么在使用“新”的第一帧后,这个过程不再有效 # import the necessary packages from imutils.video import VideoStream import argparse import datetime import imutils im

我学习了opencv运动检测教程。我在几个网站上找到了代码,大致相同:在一个while循环中,在捕获第一帧作为基本帧后,将其与下一帧进行比较,找出差异

我想每15分钟更新一次第一帧(考虑白天的光线条件),但我不明白为什么在使用“新”的第一帧后,这个过程不再有效

# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
    vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
interval = datetime.datetime.now()
# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    frame = vs.read()
    frame = frame if args.get("video", None) is None else frame[1]

    text = "Unoccupied"
    # if the frame could not be grabbed, then we have reached the end
    # of the video
    if frame is None:
        break
    # resize the frame, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=500)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)
    # if the first frame is None, initialize it
    if firstFrame is None:
        firstFrame = gray
        continue

    # compute the absolute difference between the current frame and
    # first frame
    frameDelta = cv2.absdiff(firstFrame, gray)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    # dilate the thresholded image to fill in holes, then find contours
    # on thresholded image
    thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < args["min_area"]:
            continue
        # compute the bounding box for the contour, draw it on the frame,
        # and update the text
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        text = "Occupied"



    # draw the text and timestamp on the frame
    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
            (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
    # show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF
    # if the `q` key is pressed, break from the lop
    if key == ord("q"):
        break
    # cleanup the camera and close any open windows


    if (datetime.datetime.now() - interval) > datetime.timedelta(minutes=15):
        firstFrame = None
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
#导入必要的软件包
从imutils.video导入视频流
导入argparse
导入日期时间
导入imutils
导入时间
进口cv2
#构造参数解析器并解析参数
ap=argparse.ArgumentParser()
ap.add_参数(“-v”,“--video”,help=“视频文件的路径”)
ap.add_参数(“-a”,“--min区域”,type=int,default=500,help=“最小区域大小”)
args=vars(ap.parse_args())
#如果视频参数为“无”,则我们从网络摄像头读取
如果args.get(“视频”,无)为无:
vs=VideoStream(src=0).start()
时间。睡眠(2.0)
#否则,我们将从视频文件中读取
其他:
vs=cv2.VideoCapture(args[“video”])
#初始化视频流中的第一帧
第一帧=无
interval=datetime.datetime.now()
#循环播放视频的帧
尽管如此:
#抓取当前帧并初始化已占用/未占用的帧
#正文
frame=vs.read()
frame=frame如果args.get(“video”,None)不是其他帧[1]
text=“未占用”
#如果不能抓住框架,那么我们已经到了终点
#视频的内容
如果框架为无:
打破
#调整帧大小,将其转换为灰度,然后对其进行模糊
frame=imutils.resize(frame,width=500)
灰色=cv2.CVT颜色(边框,cv2.COLOR\u BGR2GRAY)
灰色=cv2.高斯模糊(灰色,(21,21,0)
#如果第一帧为无,则初始化它
如果firstFrame为无:
第一帧=灰色
持续
#计算当前帧与当前帧之间的绝对差值
#第一帧
frameDelta=cv2.absdiff(第一帧,灰色)
thresh=cv2.threshold(frameDelta,25255,cv2.thresh_二进制)[1]
#放大阈值图像以填充孔洞,然后找到轮廓
#关于阈值图像
thresh=cv2.扩张(thresh,无,迭代次数=2)
cnts=cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,
cv2.链条(近似简单)
cnts=imutils.GRAP_轮廓(cnts)
#在轮廓上打圈
对于碳纳米管中的碳:
#如果轮廓太小,请忽略它
如果cv2.轮廓面积(c)datetime.timedelta(分钟=15):
第一帧=无
vs.stop()如果args.get(“video”,None)与release()不同
cv2.destroyAllWindows()

更新firstFrane时也要重置间隔,否则在前15分钟后每次迭代都会更新firstFrame。你说得对,非常感谢