Python 钓鱼小游戏cv2.matchTemplate的Py脚本

Python 钓鱼小游戏cv2.matchTemplate的Py脚本,python,cv2,matchtemplate,Python,Cv2,Matchtemplate,我正在尝试制作一个钓鱼小游戏的脚本,有一个帖子是关于albion online的钓鱼脚本,问题是在这个游戏中指针很薄,有很多不同的纹理和颜色,简单的灰度匹配在滑块的一个部分很好(例如在水纹理上),有时在其他部分也很好(树纹理)和不工作的第三个(例如天空)。如果我改变到较低的阈值,它经常激活而不匹配 用边缘检测cv2也试过了。运气不好 关键是当鱼在最小的绿色区域时单击按钮。该区域以滑块的随机部分显示 有什么想法吗 ================ 更新 像Furas说的那样试过配色 # impo

我正在尝试制作一个钓鱼小游戏的脚本,有一个帖子是关于albion online的钓鱼脚本,问题是在这个游戏中指针很薄,有很多不同的纹理和颜色,简单的灰度匹配在滑块的一个部分很好(例如在水纹理上),有时在其他部分也很好(树纹理)和不工作的第三个(例如天空)。如果我改变到较低的阈值,它经常激活而不匹配

用边缘检测cv2也试过了。运气不好

关键是当鱼在最小的绿色区域时单击按钮。该区域以滑块的随机部分显示

有什么想法吗

================

更新 像Furas说的那样试过配色

# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import numpy
import pyautogui
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
    help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
    help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (42, 84, 211)
greenUpper = (69, 130, 255)
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
pts = deque(maxlen=args["buffer"])

# grab video from screen(monitor area)
with mss.mss() as sct:
    monitor = {"top": 325, "left": 4423, "width": 136, "height": 662}
    while "Screen capturing":
        #last_time = time.time()
        #vs = numpy.array(sct.grab(monitor))
        #print("fps: {}".format(1 / (time.time() - last_time)))
        vs = sct.grab(monitor)
        # grab the current frame
        #frame = vs
        frame = np.array(vs)
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        # construct a mask for the color "green", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        mask2 = cv2.inRange(hsv, blueLower, blueUpper)
        mask2 = cv2.erode(mask2, None, iterations=2)
        mask2 = cv2.dilate(mask2, None, iterations=2)
        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        center = None
        cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts2 = imutils.grab_contours(cnts2)
        center2 = None
        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing rectangle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            (x, y, w, h) = cv2.boundingRect(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            c2 = max(cnts2, key=cv2.contourArea)
            (x2, y2, w2, h2) = cv2.boundingRect(c2)
            M2 = cv2.moments(c2)
            center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
            # draw the rectangle and centroid on the frame,
            # then update the list of tracked points
            cv2.rectangle(frame, (int(x), int(y)), (int(x+w), int(y+h)),(0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)
            cv2.rectangle(frame, (int(x2), int(y2)), (int(x2+w2), int(y2+h2)),(0, 255, 255), 2)
            cv2.circle(frame, center2, 5, (0, 0, 255), -1)
            # update the points queue
            pts.appendleft(center)
            if y-15 < y2 < y+15:
                pyautogui.click(4908, 984)
                time.sleep(2)
                y2 = 0           
        cv2.imshow("frame", frame)
        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break
但我能用什么来找到绿野和鱼的最大值呢? 到目前为止,它是2个掩模,2个CNT和2个最大值

有了mss sct.grab,我就没有那么好的FPS(平均25fps)了,还有其他更好的捕获方法吗


非常感谢!

好的,经过一些变通之后,我让它工作并测试了,在第一张“日地图”上使用了颜色检测。不总是能得到“完美”的命中率,但至少是“好”,我想这是因为FPS队伍的缘故,如果我找到重新发行FPS的方法,可能会更好

    # import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import pyautogui
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api

# define the lower and upper boundaries in the HSV color space, then initialize the
# list of tracked points
# Green "perfect" field
greenLower = (42, 79, 211)
greenUpper = (69, 130, 255)
# Blue fish
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
# Purple field
blue2Lower = (114, 139, 218)
blue2Upper = (123, 165, 255)
# Gray field "Status" (after fish is caught)
grayLower = (0, 0, 114)
grayUpper = (0, 0, 132)
# Purple miss
purpleLower = (123, 148, 239)
purpleUpper = (125, 165, 243)
# Define Vars
y2 = 0
a = 0
b = 0
startTime = time.time()
startTime2 = time.time()
# grab video from screen(monitor area)
with mss.mss() as sct:
    #grab picture of slider and fish
    monitor = {"top": 846, "left": 4726, "width": 162, "height": 398}
    #grab picture of gray field "Status" when the fish is caught
    monitor2 = {"top": 1017, "left": 4366, "width": 11, "height": 23}
    #grab picture of purple pixels if fish is lost
    monitor3 = {"top": 1013, "left": 4484, "width": 5, "height": 6}
    while "Screen capturing":
        vs = sct.grab(monitor)
        vs2 = sct.grab(monitor2)
        vs3 = sct.grab(monitor3) 
        # grab the current frame
        frame = np.array(vs)
        frame2 = np.array(vs2)
        frame3 = np.array(vs3)
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        blurred2 = cv2.GaussianBlur(frame2, (11, 11), 0)
        hsv2 = cv2.cvtColor(blurred2, cv2.COLOR_BGR2HSV)
        blurred3 = cv2.GaussianBlur(frame3, (11, 11), 0)
        hsv3 = cv2.cvtColor(blurred3, cv2.COLOR_BGR2HSV)
        # construct a mask for the color, then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        # Mask for green 'perfect' field
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        # Mask for Fish
        mask2 = cv2.inRange(hsv, blueLower, blueUpper)
        mask2 = cv2.erode(mask2, None, iterations=2)
        mask2 = cv2.dilate(mask2, None, iterations=2)
        # Mask for purple Start slider
        mask3 = cv2.inRange(hsv, blue2Lower, blue2Upper)
        mask3 = cv2.erode(mask3, None, iterations=2)
        mask3 = cv2.dilate(mask3, None, iterations=2)
        # Mask for gray field "Status" when the fish is caught
        mask4 = cv2.inRange(hsv2, grayLower, grayUpper)
        mask4 = cv2.erode(mask4, None, iterations=2)
        mask4 = cv2.dilate(mask4, None, iterations=2)
        # Mask for purple fish miss
        mask5 = cv2.inRange(hsv3, purpleLower, purpleUpper)
        mask5 = cv2.erode(mask5, None, iterations=2)
        mask5 = cv2.dilate(mask5, None, iterations=2)
        # find contours in the mask and initialize the current
        # (x, y) center of the rectangle
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts2 = imutils.grab_contours(cnts2)
        cnts3 = cv2.findContours(mask3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts3 = imutils.grab_contours(cnts3)
        cnts4 = cv2.findContours(mask4.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts4 = imutils.grab_contours(cnts4)
        cnts5 = cv2.findContours(mask5.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts5 = imutils.grab_contours(cnts5)
        # only proceed if at least one contour was found
        if len(cnts3) > 0:
            print ("start")
            startTime2 = time.time()
            time.sleep(0.1)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts) > 0 and len(cnts2) > 0:
            startTime2 = time.time()
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing rectangle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            (x, y) = center
            c2 = max(cnts2, key=cv2.contourArea)
            M2 = cv2.moments(c2)
            center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
            (x2, y2) = center2
            if y-65 < y2 < y+65:
                print ("catch")
                pyautogui.click(4978, 1239)
                time.sleep(1)
                y2 = 0
                startTime2 = time.time()
        elif len(cnts4) > 0:
            time.sleep(1)
            a +=1
            endTime = time.time()
            timer = endTime-startTime
            hour = timer // 3600
            timer %= 3600
            minutes = timer // 60
            timer %= 60
            seconds = timer
            print (a, "fishes and", b, "misses in %d:%d:%d" % (hour, minutes, seconds))
            print ("start over")
            time.sleep(1)
            pyautogui.click(4741, 913)
            pyautogui.click(4741, 913)
            time.sleep(1)
            pyautogui.click(4978, 1239)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts5) > 0:
            b +=1
            print ("Miss")
            time.sleep(2)
            pyautogui.click(4978, 1239)
            startTime2 = time.time()
        else:
            endTime2 = time.time()
            if (endTime2 - startTime2 > 40):
                print("Longer than 40 seconds")
                startTime2 = time.time()
                print ("restart")
                pyautogui.click(4741, 913)
                time.sleep(1)
                pyautogui.click(4978, 1239)
                time.sleep(1) 
        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break
#导入必要的软件包
从集合导入deque
从imutils.video导入视频流
将numpy作为np导入
导入argparse
进口cv2
导入imutils
导入时间
从mss.linux将mss作为mss导入
从PIL导入图像
导入mss
导入pyautogui
从win32导入win32 GUI
从pythonwin导入win32ui
从win32.lib导入win32 CON
从win32导入win32 API
#在HSV颜色空间中定义上下边界,然后初始化
#跟踪点列表
#绿色“完美”领域
格林洛=(42,79,211)
绿色上限=(69130255)
#蓝鱼
蓝花=(88,76,255)
blueUpper=(151、76、255)
#紫田
blue2Lower=(114139218)
blue2Upper=(123、165、255)
#灰色字段“状态”(捕获鱼后)
灰度下限=(0,0,114)
灰色上限=(0,0132)
#紫色小姐
紫色=(123148239)
紫色上限=(125165243)
#定义变量
y2=0
a=0
b=0
startTime=time.time()
startTime2=time.time()
#从屏幕(监控区域)抓取视频
使用mss.mss()作为sct:
#抓取滑块和鱼的图片
监视器={“顶部”:846,“左侧”:4726,“宽度”:162,“高度”:398}
#捕获鱼时,抓取灰色区域“状态”的图片
监视器2={“顶部”:1017,“左侧”:4366,“宽度”:11,“高度”:23}
#如果鱼丢失,抓取紫色像素的图片
监视器3={“顶部”:1013,“左侧”:4484,“宽度”:5,“高度”:6}
“截屏”时:
vs=sct.抓斗(监视器)
vs2=sct.抓斗(监视器2)
vs3=sct.抓斗(监视器3)
#抓取当前帧
帧=np.数组(vs)
frame2=np.array(vs2)
frame3=np.array(vs3)
#调整帧大小、模糊帧并将其转换为HSV
#色彩空间
模糊=cv2.高斯模糊(帧,(11,11),0)
hsv=cv2.CVT颜色(模糊,cv2.COLOR_BGR2HSV)
模糊2=cv2.GaussianBlur(第2帧,(11,11,0)
hsv2=cv2.cvt颜色(模糊2,cv2.COLOR_BGR2HSV)
模糊3=cv2.GaussianBlur(第3帧,(11,11,0)
hsv3=cv2.cvt颜色(模糊3,cv2.COLOR\u BGR2HSV)
#为颜色构建遮罩,然后执行
#一系列的扩张和侵蚀,以去除任何小的
#面具上留下的斑点
#绿色“完美”场的遮罩
遮罩=cv2.inRange(hsv、绿下、绿上)
掩模=cv2。腐蚀(掩模,无,迭代次数=2)
掩码=cv2。放大(掩码,无,迭代次数=2)
#鱼用口罩
mask2=cv2.inRange(hsv、蓝下、蓝上)
mask2=cv2.侵蚀(mask2,无,迭代次数=2)
mask2=cv2.扩张(mask2,无,迭代次数=2)
#紫色开始滑块的遮罩
mask3=cv2.inRange(hsv、蓝色2下部、蓝色2上部)
mask3=cv2.侵蚀(mask3,无,迭代次数=2)
mask3=cv2.扩张(mask3,无,迭代次数=2)
#捕获鱼时灰色区域“状态”的遮罩
mask4=cv2.inRange(hsv2、灰度下限、灰度上限)
mask4=cv2.侵蚀(mask4,无,迭代次数=2)
mask4=cv2.扩张(mask4,无,迭代次数=2)
#紫鱼小姐面具
mask5=cv2.inRange(hsv3,紫色,紫色以上)
mask5=cv2.侵蚀(mask5,无,迭代次数=2)
mask5=cv2.扩张(mask5,无,迭代次数=2)
#在遮罩中找到轮廓并初始化当前轮廓
#(x,y)矩形的中心
cnts=cv2.findContours(mask.copy()、cv2.RETR\u EXTERNAL、cv2.CHAIN\u近似值\u SIMPLE)
cnts=imutils.GRAP_轮廓(cnts)
cnts2=cv2.findContours(mask2.copy()、cv2.RETR\u EXTERNAL、cv2.CHAIN\u APPROX\u SIMPLE)
cnts2=imutils.grab_等高线(cnts2)
cnts3=cv2.findContours(mask3.copy()、cv2.RETR\u EXTERNAL、cv2.CHAIN\u APPROX\u SIMPLE)
cnts3=imutils.grab_等高线(cnts3)
cnts4=cv2.findContours(mask4.copy()、cv2.RETR\u EXTERNAL、cv2.CHAIN\u APPROX\u SIMPLE)
cnts4=图像抓取轮廓(cnts4)
cnts5=cv2.findContours(mask5.copy()、cv2.RETR\u EXTERNAL、cv2.CHAIN\u APPROX\u SIMPLE)
cnts5=图像抓取轮廓(cnts5)
#仅当至少找到一个轮廓时才继续
如果len(cnts3)>0:
打印(“开始”)
startTime2=time.time()
睡眠时间(0.1)
pyautogui。单击(49781239)
时间。睡眠(1)
startTime2=time.time()
elif len(cnts)>0和len(cnts2)>0:
startTime2=time.time()
#在遮罩中找到最大的轮廓,然后使用
#计算最小包围矩形和
#质心
c=最大值(CNT,键=cv2.轮廓面积)
M=cv2.力矩(c)
中心=(整数M[“m10”]/M[“m00”]),整数M[“m01”]/M[“m0”
Traceback (most recent call last):
  File "C:\Users\Game\Desktop\Py\Fish.py", line 74, in <module>
    c2 = max(cnts2, key=cv2.contourArea)
ValueError: max() arg is an empty sequence
mask = cv2.bitwise_or(mask1, mask2)
    # import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import pyautogui
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api

# define the lower and upper boundaries in the HSV color space, then initialize the
# list of tracked points
# Green "perfect" field
greenLower = (42, 79, 211)
greenUpper = (69, 130, 255)
# Blue fish
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
# Purple field
blue2Lower = (114, 139, 218)
blue2Upper = (123, 165, 255)
# Gray field "Status" (after fish is caught)
grayLower = (0, 0, 114)
grayUpper = (0, 0, 132)
# Purple miss
purpleLower = (123, 148, 239)
purpleUpper = (125, 165, 243)
# Define Vars
y2 = 0
a = 0
b = 0
startTime = time.time()
startTime2 = time.time()
# grab video from screen(monitor area)
with mss.mss() as sct:
    #grab picture of slider and fish
    monitor = {"top": 846, "left": 4726, "width": 162, "height": 398}
    #grab picture of gray field "Status" when the fish is caught
    monitor2 = {"top": 1017, "left": 4366, "width": 11, "height": 23}
    #grab picture of purple pixels if fish is lost
    monitor3 = {"top": 1013, "left": 4484, "width": 5, "height": 6}
    while "Screen capturing":
        vs = sct.grab(monitor)
        vs2 = sct.grab(monitor2)
        vs3 = sct.grab(monitor3) 
        # grab the current frame
        frame = np.array(vs)
        frame2 = np.array(vs2)
        frame3 = np.array(vs3)
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        blurred2 = cv2.GaussianBlur(frame2, (11, 11), 0)
        hsv2 = cv2.cvtColor(blurred2, cv2.COLOR_BGR2HSV)
        blurred3 = cv2.GaussianBlur(frame3, (11, 11), 0)
        hsv3 = cv2.cvtColor(blurred3, cv2.COLOR_BGR2HSV)
        # construct a mask for the color, then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        # Mask for green 'perfect' field
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        # Mask for Fish
        mask2 = cv2.inRange(hsv, blueLower, blueUpper)
        mask2 = cv2.erode(mask2, None, iterations=2)
        mask2 = cv2.dilate(mask2, None, iterations=2)
        # Mask for purple Start slider
        mask3 = cv2.inRange(hsv, blue2Lower, blue2Upper)
        mask3 = cv2.erode(mask3, None, iterations=2)
        mask3 = cv2.dilate(mask3, None, iterations=2)
        # Mask for gray field "Status" when the fish is caught
        mask4 = cv2.inRange(hsv2, grayLower, grayUpper)
        mask4 = cv2.erode(mask4, None, iterations=2)
        mask4 = cv2.dilate(mask4, None, iterations=2)
        # Mask for purple fish miss
        mask5 = cv2.inRange(hsv3, purpleLower, purpleUpper)
        mask5 = cv2.erode(mask5, None, iterations=2)
        mask5 = cv2.dilate(mask5, None, iterations=2)
        # find contours in the mask and initialize the current
        # (x, y) center of the rectangle
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts2 = imutils.grab_contours(cnts2)
        cnts3 = cv2.findContours(mask3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts3 = imutils.grab_contours(cnts3)
        cnts4 = cv2.findContours(mask4.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts4 = imutils.grab_contours(cnts4)
        cnts5 = cv2.findContours(mask5.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts5 = imutils.grab_contours(cnts5)
        # only proceed if at least one contour was found
        if len(cnts3) > 0:
            print ("start")
            startTime2 = time.time()
            time.sleep(0.1)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts) > 0 and len(cnts2) > 0:
            startTime2 = time.time()
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing rectangle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            (x, y) = center
            c2 = max(cnts2, key=cv2.contourArea)
            M2 = cv2.moments(c2)
            center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
            (x2, y2) = center2
            if y-65 < y2 < y+65:
                print ("catch")
                pyautogui.click(4978, 1239)
                time.sleep(1)
                y2 = 0
                startTime2 = time.time()
        elif len(cnts4) > 0:
            time.sleep(1)
            a +=1
            endTime = time.time()
            timer = endTime-startTime
            hour = timer // 3600
            timer %= 3600
            minutes = timer // 60
            timer %= 60
            seconds = timer
            print (a, "fishes and", b, "misses in %d:%d:%d" % (hour, minutes, seconds))
            print ("start over")
            time.sleep(1)
            pyautogui.click(4741, 913)
            pyautogui.click(4741, 913)
            time.sleep(1)
            pyautogui.click(4978, 1239)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts5) > 0:
            b +=1
            print ("Miss")
            time.sleep(2)
            pyautogui.click(4978, 1239)
            startTime2 = time.time()
        else:
            endTime2 = time.time()
            if (endTime2 - startTime2 > 40):
                print("Longer than 40 seconds")
                startTime2 = time.time()
                print ("restart")
                pyautogui.click(4741, 913)
                time.sleep(1)
                pyautogui.click(4978, 1239)
                time.sleep(1) 
        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break