Python:在运动时录制视频,但在运动不存在时释放VideoWriter

Python:在运动时录制视频,但在运动不存在时释放VideoWriter,python,python-3.x,opencv,Python,Python 3.x,Opencv,这个脚本的目标是在检测到运动时开始录制到视频文件。当不再检测到运动时,编写器将被释放,脚本将在下次检测到该运动时创建一个新视频…以此类推 使用下面的脚本,一旦检测到运动,我就可以开始写入视频文件,但我必须按下q按钮来释放编写器并使视频可播放。如果我不按q键,录制将停止,但下次出现运动时,它将被添加到现有视频中。我在一些地方尝试过writer.release(),但没有成功 # import the necessary packages from pyimagesearch.tempimage i

这个脚本的目标是在检测到运动时开始录制到视频文件。当不再检测到运动时,编写器将被释放,脚本将在下次检测到该运动时创建一个新视频…以此类推

使用下面的脚本,一旦检测到运动,我就可以开始写入视频文件,但我必须按下q按钮来释放编写器并使视频可播放。如果我不按q键,录制将停止,但下次出现运动时,它将被添加到现有视频中。我在一些地方尝试过writer.release(),但没有成功

# import the necessary packages
from pyimagesearch.tempimage import TempImage
import argparse
import warnings
import datetime
import imutils
import json
import numpy as np
import time
import cv2

print("[INFO] Kicking off script - " +
      datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
                help="path to the JSON configuration file")
args = vars(ap.parse_args())

# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None

# initialize the camera and grab a reference to the raw camera capture
# if the video argument is None, then we are reading from webcam
if not conf["use_ip_cam"]:
    camera = cv2.VideoCapture(0)
    time.sleep(0.25)

# otherwise, we are reading from a video input
else:
    camera = cv2.VideoCapture(conf["ip_cam_addr"])

# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
fourcc = 0x00000020  # a little hacky, but works for now
writer = None
(h, w) = (None, None)
zeros = None
output = None

# capture frames from the camera
# for f in camera.capture_continuous(rawCapture, format="bgr",
# use_video_port=True):
while True:
    # grab the raw NumPy array representing the image and initialize
    # the timestamp and occupied/unoccupied text
    (grabbed, frame) = camera.read()

    # frame = f.array
    timestamp = datetime.datetime.now()
    motion_detected = False

    # if the frame could not be grabbed, then we have reached the end
    # of the video
    if not grabbed:
        break

    # resize the frame, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=500)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # if the average frame is None, initialize it
    if avg is None:
        print("[INFO] starting background model...")
        avg = gray.copy().astype("float")
        # frame.truncate(0)
        continue

    # accumulate the weighted average between the current frame and
    # previous frames, then compute the difference between the current
    # frame and running average
    cv2.accumulateWeighted(gray, avg, 0.5)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

    # threshold the delta image, dilate the thresholded image to fill
    # in holes, then find contours on thresholded image
    thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
                           cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)

    (_, cnts, _) = cv2.findContours(thresh.copy(),
                                    cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < conf["min_area"]:
            continue

        # compute the bounding box for the contour, draw it on the frame,
        # and update the text
        (x, y, w1, h1) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w1, y + h1), (0, 255, 0), 2)
        motion_detected = True

    fps = camera.get(cv2.CAP_PROP_FPS)
    ts = timestamp.strftime("%Y-%m-%d_%H_%M_%S")
    time_and_fps = ts + " - fps: " + str(fps)

    # draw the text and timestamp on the frame
    cv2.putText(frame, "Motion Detected: {}".format(motion_detected), (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, time_and_fps, (10, frame.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.35, (0, 0, 255), 1)

    # check to see if the room is occupied
    if motion_detected:
        motionCounter += 1
        # check to see if the number of frames with consistent motion is
        # high enough
        if motionCounter >= conf["min_motion_frames"]:
            # check if the writer is None
            if writer is None:
                print("hitting writer is none")
                # store the image dimensions, initialzie the video
                # writer, and construct the zeros array
                (h2, w2) = frame.shape[:2]
                writer = cv2.VideoWriter("/Users/user/Library/Mobile Documents/com~apple~CloudDocs/testMotionDetection/" +
                                         datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S") + ".mp4",
                                         fourcc, fps,
                                         (w2, h2), True)
                zeros = np.zeros((h2, w2), dtype="uint8")

            # construct the final output frame, storing the
            # original frame
            output = np.zeros((h2, w2, 3), dtype="uint8")
            output[0:h2, 0:w2] = frame

            # write the output frame to file
            writer.write(output)

    # otherwise, there is no motion
    else:
      writer.release()
        # Traceback (most recent call last):
        #  File "pi_surveillance.py", line 178, in <module>
        #    writer.release()
        # AttributeError: 'NoneType' object has no attribute 'release'
      motionCounter = 0

    # check to see if the frames should be displayed to screen
    if conf["show_video"]:
        # display the security feed
        cv2.imshow("Security Feed", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key is pressed, break from the loop
        if key == ord("q"):
            break

# cleanup the camera and close any open windows
print("[INFO] cleaning up...")
camera.release()
cv2.destroyAllWindows()
# writer.release() - only releases writer when q is pressed
#导入必要的软件包
从pyimagesearch.tempimage导入tempimage
导入argparse
进口警告
导入日期时间
导入imutils
导入json
将numpy作为np导入
导入时间
进口cv2
打印(“[INFO]启动脚本…”+
datetime.datetime.now().strftime(“%Y-%m-%d\u%H\u%m\u%S”))
#构造参数解析器并解析参数
ap=argparse.ArgumentParser()
ap.add_参数(“-c”,“--conf”,required=True,
help=“JSON配置文件的路径”)
args=vars(ap.parse_args())
#过滤警告,加载配置并初始化Dropbox
#客户
警告。过滤器警告(“忽略”)
conf=json.load(打开(args[“conf”]))
客户端=无
#初始化相机并获取原始相机捕获的引用
#如果视频参数为“无”,则我们从网络摄像头读取
如果没有配置[“使用ip\U cam”]:
摄像机=cv2。视频捕获(0)
睡眠时间(0.25)
#否则,我们将从视频输入读取数据
其他:
摄像头=cv2.视频捕获(配置[“ip\U cam\U addr”])
#让相机预热,然后初始化平均帧,最后
#上传的时间戳和帧运动计数器
打印(“[INFO]预热…”)
睡眠时间(conf[“摄像机预热时间”])
平均值=无
LastUpload=datetime.datetime.now()
运动计数器=0
fourcc=0x00000020#有点老套,但现在可以用了
writer=无
(h,w)=(无,无)
零=无
输出=无
#从相机捕获帧
#对于照相机中的f。连续捕获(rawCapture,format=“bgr”,
#使用_video_port=True):
尽管如此:
#获取表示图像的原始NumPy数组并初始化
#时间戳和已占用/未占用的文本
(抓取,帧)=照相机。读取()
#frame=f.array
timestamp=datetime.datetime.now()
检测到运动=错误
#如果不能抓住框架,那么我们已经到了终点
#视频的内容
如果没有抓到:
打破
#调整帧大小,将其转换为灰度,然后对其进行模糊
frame=imutils.resize(frame,width=500)
灰色=cv2.CVT颜色(边框,cv2.COLOR\u BGR2GRAY)
灰色=cv2.高斯模糊(灰色,(21,21,0)
#如果平均帧为无,则初始化它
如果平均值为无:
打印(“[INFO]正在启动背景模型…”)
avg=gray.copy().astype(“float”)
#frame.truncate(0)
持续
#累计当前帧和当前帧之间的加权平均值
#上一帧,然后计算当前帧之间的差异
#帧和运行平均值
cv2.累计加权(灰色,平均值0.5)
frameDelta=cv2.absdiff(灰色,cv2.convertScaleAbs(平均))
#对增量图像设置阈值,放大阈值图像以填充
#在孔中,然后在阈值图像上查找轮廓
thresh=cv2.阈值(frameDelta,conf[“delta_thresh”],255,
cv2.THRESH_二进制[1]
thresh=cv2.扩张(thresh,无,迭代次数=2)
(_,cnts,_)=cv2.findContentours(thresh.copy(),
cv2.RETR_外部,cv2.CHAIN_近似(简单)
#在轮廓上打圈
对于碳纳米管中的碳:
#如果轮廓太小,请忽略它
如果cv2.contourArea(c)=conf[“最小运动帧”]:
#检查writer是否为None
如果writer为None:
打印(“命中编写器为无”)
#存储图像尺寸,初始化视频
#编写器,并构造零数组
(h2,w2)=帧形状[:2]
writer=cv2.VideoWriter(“/Users/user/Library/Mobile Documents/com~apple~CloudDocs/testMotionDetection/”+
datetime.datetime.now().strftime(“%Y-%m-%d\u%H\u%m\u%S”)+“.mp4”,
fourcc,fps,
(w2,h2),真)
零=np.零((h2,w2),dtype=“uint8”)
#构造最终输出帧,存储
#原始帧
输出=np.零((h2,w2,3),dtype=“uint8”)
输出[0:h2,0:w2]=帧
#将输出帧写入文件
writer.write(输出)
#否则,就没有动议
其他:
writer.release()
#回溯(最近一次呼叫最后一次):
#文件“pi_supervision.py”,第178行,在
#writer.release()
#AttributeError:“非类型”ob
#!/usr/local/bin/python3

import argparse
import warnings
import datetime
import imutils
import json
import numpy as np
import os
import time
import cv2

print("[INFO] Kicking off script - " +
      datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
                help="path to the JSON configuration file")
args = vars(ap.parse_args())

# filter warnings, load the configuration
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))

# initialize the camera and grab a reference to the raw camera capture
# if the video argument is None, then we are reading from webcam
if not conf["use_ip_cam"]:
    camera = cv2.VideoCapture(0)
    time.sleep(0.25)

# otherwise, we are reading from a video input
else:
    camera = cv2.VideoCapture(conf["ip_cam_addr"])

# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motion_counter = 0
non_motion_timer = conf["nonMotionTimer"]
fourcc = 0x00000020  # a little hacky, but works for now
writer = None
(h, w) = (None, None)
zeros = None
output = None
made_recording = False

# capture frames from the camera
while True:
    # grab the raw NumPy array representing the image and initialize
    # the timestamp and occupied/unoccupied text
    (grabbed, frame) = camera.read()

    timestamp = datetime.datetime.now()
    motion_detected = False

    # if the frame could not be grabbed, then we have reached the end
    # of the video
    if not grabbed:
        print("[INFO] Frame couldn't be grabbed. Breaking - " +
              datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))
        break

    # resize the frame, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=conf["resizeWidth"])
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # if the average frame is None, initialize it
    if avg is None:
        print("[INFO] starting background model...")
        avg = gray.copy().astype("float")
        # frame.truncate(0)
        continue

    # accumulate the weighted average between the current frame and
    # previous frames, then compute the difference between the current
    # frame and running average
    cv2.accumulateWeighted(gray, avg, 0.5)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

    # threshold the delta image, dilate the thresholded image to fill
    # in holes, then find contours on thresholded image
    thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
                           cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    (_, cnts, _) = cv2.findContours(thresh.copy(),
                                    cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < conf["min_area"]:
            continue

        # compute the bounding box for the contour, draw it on the frame,
        # and update the text
        (x, y, w1, h1) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w1, y + h1), (0, 255, 0), 2)
        motion_detected = True

    fps = int(round(camera.get(cv2.CAP_PROP_FPS)))
    record_fps = 10
    ts = timestamp.strftime("%Y-%m-%d_%H_%M_%S")
    time_and_fps = ts + " - fps: " + str(fps)

    # draw the text and timestamp on the frame
    cv2.putText(frame, "Motion Detected: {}".format(motion_detected), (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, time_and_fps, (10, frame.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.35, (0, 0, 255), 1)

    # Check if writer is None TODO: make path configurable
    if writer is None:
        filename = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
        file_path = (conf["userDir"] + "/Library/Mobile Documents/"
                     "com~apple~CloudDocs/testMotionDetection/testing/"
                     "{filename}.mp4")
        file_path = file_path.format(filename=filename)

        (h2, w2) = frame.shape[:2]
        writer = cv2.VideoWriter(file_path, fourcc, record_fps, (w2, h2), True)
        zeros = np.zeros((h2, w2), dtype="uint8")

    def record_video():
        # construct the final output frame, storing the original frame
        output = np.zeros((h2, w2, 3), dtype="uint8")
        output[0:h2, 0:w2] = frame

        # write the output frame to file
        writer.write(output)
        # print("[DEBUG] Recording....")

    if motion_detected:

        # increment the motion counter
        motion_counter += 1

        # check to see if the number of frames with motion is high enough
        if motion_counter >= conf["min_motion_frames"]:
            if conf["create_image"]:
                # create image TODO: make path configurable
                image_path = (conf["userDir"] + "/Library/Mobile Documents/"
                              "com~apple~CloudDocs/testMotionDetection/testing"
                              "/{filename}.jpg").format(filename=filename)
                cv2.imwrite(image_path, frame)

            record_video()

            made_recording = True
            non_motion_timer = conf["nonMotionTimer"]

    # If there is no motion, continue recording until timer reaches 0
    # Else clean everything up
    else:  # TODO: implement a max recording time
        # print("[DEBUG] no motion")
        if made_recording is True and non_motion_timer > 0:
            non_motion_timer -= 1
            # print("[DEBUG] first else and timer: " + str(non_motion_timer))
            record_video()
        else:
            # print("[DEBUG] hit else")
            motion_counter = 0
            if writer is not None:
                # print("[DEBUG] hit if 1")
                writer.release()
                writer = None
            if made_recording is False:
                # print("[DEBUG] hit if 2")
                os.remove(file_path)
            made_recording = False
            non_motion_timer = conf["nonMotionTimer"]

    # check to see if the frames should be displayed to screen
    if conf["show_video"]:
        cv2.imshow("Security Feed", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key is pressed, break from the loop
        if key == ord("q"):
            break

# cleanup the camera and close any open windows
print("[INFO] cleaning up...")
camera.release()
cv2.destroyAllWindows()
{
  "show_video": true,
  "use_ip_cam": false,
  "ip_cam_addr": "rtsp://<ip>/live0.264",
  "create_image": true,
  "min_upload_seconds": 5,
  "min_motion_frames": 12,
  "camera_warmup_time": 2.5,
  "delta_thresh": 5,
  "resolution": [640, 480],
  "fps": 16,
  "min_area": 500,
  "userDir": "/Path/to/user",
  "resizeWidth": 500,
  "nonMotionTimer": 36
}