更快的颜色跟踪方式红色和绿色led opencv python

更快的颜色跟踪方式红色和绿色led opencv python,python,opencv,Python,Opencv,我正在尝试使用OpenCV python跟踪一个带有红色和绿色led的机器人。以下是我正在使用的代码: import argparse import cv2 import numpy as np import scipy.io as sio import time frame = None # returns red and green color LED position #tracks color on intensity thresholded spots def colorTrack

我正在尝试使用OpenCV python跟踪一个带有红色和绿色led的机器人。以下是我正在使用的代码:

import argparse
import cv2
import numpy as np
import scipy.io as sio
import time

frame = None

# returns red and green color LED position
#tracks color on intensity thresholded spots
def colorTracking(thresholdedImage, originalImage):
    # LED positions
    redLEDPos = (-1,-1)
    greenLEDPos = (-1,-1)

    #convert color from BGR to HSV
    hsvThresholdedImage = cv2.cvtColor(thresholdedImage, cv2.COLOR_BGR2HSV)

    #define lower and upper limit of hue range for red color
    lowerRedHueRange = cv2.inRange(hsvThresholdedImage, np.array([0, 100, 100]), np.array([10, 255, 255]))
    upperRedHueRange = cv2.inRange(hsvThresholdedImage, np.array([160, 100, 100]), np.array([179, 255, 255]))

    #Hue Range for Green
    lowerGreenHue = np.array([50, 50, 120])
    upperGreenHue = np.array([70, 255, 255])
    greenMask = cv2.inRange(hsvThresholdedImage, lowerGreenHue, upperGreenHue)

    #Calculates the weighted sum of two arrays.
    redHueThresholdedImage = cv2.addWeighted(lowerRedHueRange, 1.0, upperRedHueRange, 1.0, 0.0)
    #Blurs an image using a Gaussian filter
    redHueThresholdedImage = cv2.GaussianBlur(redHueThresholdedImage, (9, 9), 2, 2)

    #Blurs an image using a Gaussian filter
    greenHueThresholdedImage = cv2.GaussianBlur(greenMask, (9, 9), 2, 2)

    #find contours in the red hue image formed after weighted adding of lower and upper ranges of red
    redContours = cv2.findContours(redHueThresholdedImage.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
    # only proceed if at least one contour was found
    if len(redContours) > 0:
        # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
        maxContour = max(redContours, key=cv2.contourArea)
        M = cv2.moments(maxContour)
        redLEDPos = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])

    #find contours in the green hue image formed
    greenContours = cv2.findContours(greenHueThresholdedImage.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
    # only proceed if at least one contour was found
    if len(greenContours) > 0:
        # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
        maxContour = max(greenContours, key=cv2.contourArea)
        M = cv2.moments(maxContour)
        greenLEDPos = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])

    return redLEDPos, greenLEDPos

#main loop
def posTrack():
    red_x = []
    red_y = []
    green_x = []
    green_y = []

    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video",required=True, help="Path to the video file")
    ap.add_argument("-pf","--posFileName",required=True, help="Path to the mat file")
    args = vars(ap.parse_args())

    # load the video,and setup the mouse callback function
    cap = cv2.VideoCapture(args["video"])
    width = cap.get(3)
    height = cap.get(4)
    if not cap.isOpened():
        print "Fatal Error: Could not open the specified file."
        exit(-1)
    #cv2.namedWindow("VideoFrame")

    # keep looping until the 'q' key is pressed
    while True:
        #read the vide frame by frame
        ret, frame = cap.read()

        if frame is None:
            break
        frameClone = frame.copy()
        #display the frame and wait for a keypress
        #Median Blur: smoothening the images makes median of all the pixels under kernel area and central element= median value.
        frame = cv2.medianBlur(frame,5)
        #Intensity threholding the Main Frame
        thresholdedFrame = cv2.threshold(frame, 160, 255, cv2.THRESH_BINARY)[-1]
        #red and green LED position in Main Video
        redLED, greenLED = colorTracking(thresholdedFrame, frame)

        red_x.append(redLED[0])
        red_y.append(redLED[1])
        green_x.append(greenLED[0])
        green_y.append(greenLED[1])

        key = cv2.waitKey(1) & 0xFF
        # if the 'q' key is pressed, break from the loop
        if key == ord("q"):
            break

    # close all open windows
    cv2.destroyAllWindows()

    return args["posFileName"], red_x, red_y, green_x, green_y, width, height

startTime = time.time()
print startTime
#retrieves red_x, red_y, green_x, green_y
filename, red_x, red_y, green_x, green_y, width, height = posTrack()
print "\n"
print time.time() - startTime
#save the mat file
sio.savemat(filename, mdict={'width':width, 'height': height, 'red_x':red_x, 'red_y':red_y, 'green_x':green_x, 'green_y':green_y})
有没有办法提高这个追踪代码的速度?播放一个小时的视频需要30分钟。 谢谢

编辑 预期输出:带有x的mat文件以及红色和绿色led的坐标

输入:我现在身上的视频来自研究实验(不确定是否可以分享)。将为相同的设置创建一个虚拟视频并上传。来自其中一个帧的图像:

您有很多代码可以在单个API调用中使用,我想检查示例输入,并在您的问题中附加示例输入和预期输出:)您好,我现在的输入视频无法共享。我将创建一个虚拟视频并很快与大家分享。谢谢我不是要求分享整个视频,但它的一个单帧可以很好地工作,或者你可以从某个看起来像你的用例的地方下载图像:)我添加了一个图像。让我知道它是否有效。再次感谢你的时间。这是你得到的唯一信息吗?为什么周围有很多空白?通过裁剪来减小图像大小肯定会导致更好的基准测试。您有很多代码可以在单个API调用中使用,我想检查示例输入,在您的问题中附加示例输入和预期输出:)嗨,我现在身上的输入视频无法共享。我将创建一个虚拟视频并很快与大家分享。谢谢我不是要求分享整个视频,但它的一个单帧可以很好地工作,或者你可以从某个看起来像你的用例的地方下载图像:)我添加了一个图像。让我知道它是否有效。再次感谢你的时间。这是你得到的唯一信息吗?为什么周围有很多空白?通过裁剪来减小图像大小肯定会带来更好的基准。