Warning: file_get_contents(/data/phpspider/zhask/data//catemap/7/python-2.7/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 3.x 如何从保存在文件中的多个帧装载视频?_Python 3.x_Python 2.7_Opencv3.0 - Fatal编程技术网

Python 3.x 如何从保存在文件中的多个帧装载视频?

Python 3.x 如何从保存在文件中的多个帧装载视频?,python-3.x,python-2.7,opencv3.0,Python 3.x,Python 2.7,Opencv3.0,我使用一个利用卡尔曼滤波器稳定视频的代码,但我无法录制要保存的视频。我想把所有的帧保存在一个文件中,以后再制作一个视频 如果有人有更好的想法,或者如何修改代码以使用cv2.videoWriter。请提供一个。我想从张贴的代码的稳定帧中下载一个视频,我使用了cv2.videoWriter,但它不起作用。我正在考虑使用ffmpeg。请包含一些您尝试过的代码,以便人们可以查看它并提供评论。我尝试使用cv2.videoWriter,如下代码所示,但它创建了一个没有内容的视频,我正在考虑按如下方式保存每个

我使用一个利用卡尔曼滤波器稳定视频的代码,但我无法录制要保存的视频。我想把所有的帧保存在一个文件中,以后再制作一个视频


如果有人有更好的想法,或者如何修改代码以使用cv2.videoWriter。

请提供一个。我想从张贴的代码的稳定帧中下载一个视频,我使用了cv2.videoWriter,但它不起作用。我正在考虑使用ffmpeg。请包含一些您尝试过的代码,以便人们可以查看它并提供评论。我尝试使用cv2.videoWriter,如下代码所示,但它创建了一个没有内容的视频,我正在考虑按如下方式保存每个帧。cv2.imwrite../data/video/img%d.png%cont,超过使用ffmpeg-ffmpeg-start\u number 0-i imeg\uu%d.png-vcodec-mpeg4 test.avi你的意思是这是问题的一部分吗?我想从发布的代码的稳定帧中启动一个视频,我使用了cv2.videoWriter,但它不起作用我正在考虑使用ffmpeg。
import cv2
import numpy as np
from matplotlib import pyplot as plt


ini = time.time()

class FrameInfo:
    def __init__(self):
        self.img = None
        self.img_gray = None
        self.features = []
        self.number = 0
        self.trajectory = Trajectory()

    @property
    def width(self):
        return self.img_gray.shape[1]

    @property
    def height(self):
        return self.img_gray.shape[0]

    @property
    def width_height(self):
        return self.img_gray.shape[::-1]

videoInPath = "../data/MVI_5408.avi"
camera = cv2.VideoCapture(videoInPath)

videoOutPath = videoInPath +'sab.avi'
N_FRAMES = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
FPS = camera.get(cv2.CAP_PROP_FPS)
VID_WIDTH = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
VID_HEIGHT = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
print "N_FRAMES: " + str(N_FRAMES)
print "FPS: " + str(FPS)

frame = None
prev_frame = None

trajectory = Trajectory(0, 0, 0)
org_trajectories = []
stb_trajectories = []

crop = 40
crop_rate = crop / 20
filter = KalmanFilter(Trajectory(4e-2, 4e-2, 4e-2), Trajectory(crop_rate, 
crop_rate, crop_rate), Trajectory(1, 1, 1))
#surf = cv2.SURF(4000)
prev_trans = None
frame_number = 0




frame_width = int(1336.0 / 2)
frame_height = int(768.0 / 2)



def resize(img):
    return cv2.resize(img, (frame_width, frame_height), 
interpolation=cv2.INTER_LANCZOS4)


lk_params = dict(winSize=(19, 19),
             maxLevel=2,
             criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

feature_params = dict(maxCorners=100,
                  qualityLevel=0.01,
                  minDistance=8,
                  blockSize=19)

crop_rate = 0.9
limits = [int(frame_width * (1 - crop_rate)), int(frame_height * (1 - 
crop_rate)), 0.05]


output_video = cv2.VideoWriter(videoOutPath,
                               cv2.VideoWriter_fourcc(*'XVID'), # -1, # 
int(video.get(cv2.cv.CV_CAP_PROP_FOURCC)),
                               FPS,
                               (frame_width - 2*limits[0], frame_height - 2*limits[1])
)


feature_cont = 0
flow_cont = 0
ransac_cont = 0
kalman_cont = 0
features_quant = []
percent = 0
inliers_quant = []
ouliers_quant = []
compensate_count = 0

for k in range(N_FRAMES-1):
    # Capture frame_img-by-frame_img
    ret, frame_img = camera.read()

    if frame_img is None:
        break

    if frame is not None:
        prev_frame = frame

    frame_number += 1

    if frame_number < 220:
        continue


    frame = FrameInfo()
    frame.number = frame_number
    frame.img = frame_img
    frame.img = cv2.resize(frame_img, (0, 0), fx=(1336.0 / frame_img.shape[1]) / 2.0,
                       fy=(768.0 / frame_img.shape[0]) / 2.0, 
    interpolation=cv2.INTER_LANCZOS4)
    frame.img_gray = cv2.cvtColor(frame.img, cv2.COLOR_BGR2GRAY)
    feature_time_ini = time.time()
    frame.features = cv2.goodFeaturesToTrack(frame.img_gray, **feature_params)
    #frame.features = sift.detect(frame.img_gray,None)
    features_quant.append(len(frame.features))

    if prev_frame is None:
       continue

    feature_time_fim = time.time()
    feature_cont += feature_time_fim - feature_time_ini
    feature_time_ini = 0
    feature_time_fim = 0


    flow_time_ini = time.time()
    new_features, _, _ = cv2.calcOpticalFlowPyrLK(prev_frame.img, frame.img, prev_frame.features, None, **lk_params)
    new_features_for_validation, _, _ = cv2.calcOpticalFlowPyrLK(frame.img, prev_frame.img, new_features, None,
                                                             **lk_params)

    flow_time_fim = time.time()
    flow_cont += flow_time_fim - flow_time_ini
    flow_time_ini = 0
    flow_time_fim = 0

    d = abs(prev_frame.features - new_features_for_validation).reshape(-1, 2).max(-1)
    good = d < 1

    # Select good_features points
    good_new = np.array([x for x, s in zip(new_features, good) if s], dtype=np.float32)
    good_old = np.array([x for x, s in zip(prev_frame.features, good) if s], dtype=np.float32)

    # trans = cv2.estimateRigidTransform(good_old, good_new, fullAffine=False)
    ransac_time_ini = time.time()
    trans, inliers_indices,outliers_indices = RansacMotionEstimator(40, 1.0).estimate(good_old, good_new)
    ransac_time_fim = time.time()
    ransac_cont += ransac_time_fim - ransac_time_ini
    ransac_time_ini = 0
    ransac_time_fim = 0

    inliers_quant.append(len(inliers_indices))
    ouliers_quant.append(len(outliers_indices))


    if trans is None and prev_trans is None:
        print ("wuf? trans is None and prev_trans is none too")
        continue

    if trans is None:
        trans = prev_trans
        print ("wut? trans is None")


    delta = Trajectory(get_x(trans), get_y(trans), get_rad_angle(trans))
    trajectory += delta


    kalman_time_ini = time.time()
    filter.put(trajectory)
    diff = filter.get() - trajectory
    new_delta = delta + diff
    kalman_time_fim = time.time()
    org_trajectories.append(trajectory)
    stb_trajectories.append(filter.get())

    kalman_cont += kalman_time_fim - kalman_time_ini
    kalman_time_ini = 0
    kalman_time_fim = 0

    fill_mat(trans, new_delta.x, new_delta.y, new_delta.angle)

    compensate_time_ini = time.time()
    #out = cv2.warpAffine(frame.img, trans, frame.width_height, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
    #out = cv2.warpAffine(frame_img, trans, frame.width_height, flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_REFLECT)
    #out = cv2.warpAffine(frame_img, trans, frame.width_height)
    out = cv2.warpAffine(frame_img, trans, frame.width_height, flags=cv2.INTER_CUBIC,borderMode=cv2.BORDER_REPLICATE)

    compensate_time_fim = time.time()
    compensate_count += compensate_time_fim - compensate_time_ini
    compensate_time_ini = 0
    compensate_time_fim = 0
    #out = cv2.warpAffine(frame_img, trans, frame.width_height, flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_REFLECT)
    #out = frame.img.copy()
    prev_trans = trans.copy()

    output_video.write(out)

camera.release()
output_video.release()
cv2.destroyAllWindows()