Python 如何让代码检查跟踪对象位于哪个部分?

Python 如何让代码检查跟踪对象位于哪个部分?,python,opencv,Python,Opencv,如果标题不清楚,很抱歉。我正在用OpenCV Python编写一段代码,跟踪我的宠物金鱼的位置,并根据它在帧中的哪个象限输出一个击键。我已经让程序跟踪我的鱼并输出它在坐标中的位置,但是我如何将其转换为击键(W,a,S,D)?比如,如果鱼在象限一,输出W,如果鱼在象限二,输出A 这是我目前掌握的代码 # import the necessary packages from collections import deque from imutils.video import VideoStream

如果标题不清楚,很抱歉。我正在用OpenCV Python编写一段代码,跟踪我的宠物金鱼的位置,并根据它在帧中的哪个象限输出一个击键。我已经让程序跟踪我的鱼并输出它在坐标中的位置,但是我如何将其转换为击键(W,a,S,D)?比如,如果鱼在象限一,输出W,如果鱼在象限二,输出A

这是我目前掌握的代码

# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
    help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
    help="max buffer size")
args = vars(ap.parse_args())

# define the lower and upper boundaries of the "orange"
# fish in the HSV color space
orangeLower = (5, 50, 50)
orangeUpper = (15, 255, 255)
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)

# keep looping
while True:
    # grab the current frame
    frame = vs.read()
    # handle the frame from VideoCapture or VideoStream
    frame = frame[1] if args.get("video", False) else frame
    # if we are viewing a video and we did not grab a frame,
    # then we have reached the end of the video
    if frame is None:
        break
    # resize the frame, blur it, and convert it to the HSV
    # color space
    frame = imutils.resize(frame, width=600)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
    # construct a mask for the color "orange", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    mask = cv2.inRange(hsv, orangeLower, orangeUpper)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)
    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    center = None

        # only proceed if at least one contour was found
    if len(cnts) > 0:
        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        c = max(cnts, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        # only proceed if the radius meets a minimum size
        if radius > 10:
            # draw the circle and centroid on the frame,
            # then update the list of tracked points
            cv2.circle(frame, (int(x), int(y)), int(radius),
                (0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)
            pts.appendleft(center)

                # loop over the set of tracked points
    for i in np.arange(1, len(pts)):
        # if either of the tracked points are None, ignore
        # them
        if pts[i - 1] is None or pts[i] is None:
            continue
        # check to see if enough points have been accumulated in
        # the buffer
        if counter >= 10 and i == 10 and pts[i-10] is not None:
            # compute the difference between the x and y
            # coordinates and re-initialize the direction
            # text variables
            dX = pts[i-10][0] - pts[i][0]
            dY = pts[i-10][1] - pts[i][1]
            (dirX, dirY) = ("", "")
            # ensure there is significant movement in the
            # x-direction
            if np.abs(dX) > 20:
                dirX = "East" if np.sign(dX) == 1 else "West"
            # ensure there is significant movement in the
            # y-direction
            if np.abs(dY) > 20:
                dirY = "South" if np.sign(dY) == 1 else "North"
            # handle when both directions are non-empty
            if dirX != "" and dirY != "":
                direction = "{}-{}".format(dirY, dirX)
            # otherwise, only one direction is non-empty
            else:
                direction = dirX if dirX != "" else dirY

                        # otherwise, compute the thickness of the line and
        # draw the connecting lines
        thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
        cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
    # show the movement deltas and the direction of movement on
    # the frame
    cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
        0.65, (0, 0, 255), 3)
    cv2.putText(frame, "dx: {}, dy: {}".format(dX, dY),
        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
        0.35, (0, 0, 255), 1)
    # show the frame to our screen and increment the frame counter
    cv2.imshow("Frame", frame)
    cv2.line(img=frame, pt1=(300, 0), pt2=(300, 500), color=(0, 0, 0), thickness=3, lineType=8, shift=0) 
    cv2.line(img=frame, pt1 = (0, 225), pt2 = (600, 225), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
    counter += 1
    # if the 'q' key is pressed, stop the loop
    if key == ord("q"):
        break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
    vs.stop()
# otherwise, release the camera
else:
    vs.release()
# close all windows
cv2.destroyAllWindows()

谢谢你的帮助。

如果我没听错你的意思,你想在相框上放一张图片,那张图片取决于你的鱼的方向。就像你用wasd命令操纵它,对吗? 要将一个图像放置到另一个图像中,请执行以下操作:

import cv2

import numpy as np

img1 = cv2.imread('Fishframe.jpg')
img2 = cv2.imread('W_button.jpg')

img3 = img1.copy()
# replace values at coordinates (300, 300) to (399, 399) of img3 with region of img2
img3[300:400,300:400,:] = img2[300:400,300:400,:]
cv2.imshow('Result1', img3)
所以你只需“剪”出一块,把你的照片放在它留下的洞里


让我知道它是否有效!玩得开心

你说的
输出击键是什么意思?
?比如在框架上,它说W、a、S或D,这取决于鱼在哪个象限