Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/346.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python OpenCV断言失败,值为负值_Python_Opencv_Facial Identification - Fatal编程技术网

Python OpenCV断言失败,值为负值

Python OpenCV断言失败,值为负值,python,opencv,facial-identification,Python,Opencv,Facial Identification,我正在用OpenCV和Keras制作一个人脸活力检测程序。 我被这个错误所困扰: OpenCV断言失败,值为负值 我尝试了所有建议的答案,但都没有解决我的问题 我已经尝试了StackOverflow和Github问题上的所有解决方案,但在我的案例中没有一个成功 video_capture = cv2.VideoCapture(0) model = load_model() print("[LOG] COLLECTING images.....") images = [

我正在用OpenCV和Keras制作一个人脸活力检测程序。 我被这个错误所困扰:

OpenCV断言失败,值为负值

我尝试了所有建议的答案,但都没有解决我的问题

我已经尝试了StackOverflow和Github问题上的所有解决方案,但在我的案例中没有一个成功

video_capture = cv2.VideoCapture(0)

    model = load_model()

    print("[LOG] COLLECTING images.....")
    images = []
    for direc, _, files in tqdm(os.walk(dataset)):
        for file in files:
            if file.endswith("jpg"):
                images.append(os.path.join(direc, file))

    return model, face_detector, open_eyes_detector, left_eye__detector, right_eye_detector, video_capture, images


def process_and_encode(images):
    known_encodings = []
    known_names = []
    print("[LOG] Encoding faces....")

    for image_path in tqdm(images):

        image = cv2.imread(image_path)

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        boxes = face_recognition.face_locations(image, model='hog')
        encoding = face_recognition.face_encodings(image, boxes)
        name = image_path.split(os.path.sep)[-2]
        if len(encoding) > 0:
            known_encodings.append(encoding[0])
            known_names.append(name)
    return {"encodings": known_encodings, "names": known_names}

错误消息:-

参数“src”应为cv::UMat 回溯最近一次呼叫上次: 文件C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py,第190行,in cv2.imshowFace活度检测器,帧 cv2.error:OpenCV4.1.0 C:\projects\opencv python\opencv\modules\highgui\src\window.cpp:352:错误:-215:函数“cv::imshow”中的断言失败大小.宽度>0和大小.高度>0

[警告:0]正在终止异步回调


这可能是因为您尝试使用imshow,但以前没有imwite。这不完全是一个解决方案,而是一个有效的例子。看一看:

import cv2  # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests

cap = cv2.VideoCapture(0)

faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)

filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')


def recognizer():
    while True:
        ret, frame = cap.read()

        profile_count = recognize_profile(frame)
        face_count, eye_count = recognize_face(frame, True)

        cv2.imwrite('%s.png' % filename, frame)
        image = cv2.imread('%s.png' % filename)
        cv2.imshow('image', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()


def recognize_profile(frame):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    profiles = profile_cascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    for (x, y, w, h) in profiles:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
    return len(profiles)


def recognize_face(frame, recognize_eyes=None):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    eyes = []
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if recognize_eyes:
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = frame[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            # draw bounding boxes around detected features
            for (ex, ey, ew, eh) in eyes:
                eye_center = (ex + ew // 2, ey + eh // 2)
                radius = int(round((ew + eh) * 0.25))
                cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
    return len(faces), len(eyes)


def snapshot():
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)
        cv2.imwrite('%s.png' % filename, frame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    cap.release()
    cv2.destroyAllWindows()


def live_video_recognizer():
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
    while True:
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)

        if ret is True:

            out.write(frame)

            cv2.imshow('frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()


recognizer()
# snapshot()
# live_video_recognizer()

未创建帧0x0。如果detect_和_display仅包含frame=video_capture.read,会发生什么情况?如果它能工作,你会逐步在detect_和_display中添加一些进程,直到它再次崩溃。不,它不工作。嗨!现在我得到了这个错误:-ValueError:设置一个数组元素和一个序列。你能帮帮我吗?你从哪里得到这个错误的?把你的堆栈跟踪发给我这里是我的堆栈跟踪:-请帮帮我。回溯最近一次调用:文件C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py,第192行,在右眼检测器中,数据,眼睛检测到的文件C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py,第81行,在检测和显示框中=cv2.resizenp.float32frame,0,0,fx=0.6,fy=0.6 ValueError:设置带有序列的数组元素。嘿!你看过我的堆栈跟踪了吗?
import cv2  # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests

cap = cv2.VideoCapture(0)

faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)

filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')


def recognizer():
    while True:
        ret, frame = cap.read()

        profile_count = recognize_profile(frame)
        face_count, eye_count = recognize_face(frame, True)

        cv2.imwrite('%s.png' % filename, frame)
        image = cv2.imread('%s.png' % filename)
        cv2.imshow('image', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()


def recognize_profile(frame):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    profiles = profile_cascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    for (x, y, w, h) in profiles:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
    return len(profiles)


def recognize_face(frame, recognize_eyes=None):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    eyes = []
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if recognize_eyes:
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = frame[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            # draw bounding boxes around detected features
            for (ex, ey, ew, eh) in eyes:
                eye_center = (ex + ew // 2, ey + eh // 2)
                radius = int(round((ew + eh) * 0.25))
                cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
    return len(faces), len(eyes)


def snapshot():
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)
        cv2.imwrite('%s.png' % filename, frame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    cap.release()
    cv2.destroyAllWindows()


def live_video_recognizer():
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
    while True:
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)

        if ret is True:

            out.write(frame)

            cv2.imshow('frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()


recognizer()
# snapshot()
# live_video_recognizer()