Python imageai两个时代后出错:VALUERROR:确保指定了正确的输入图像、输入类型、输出类型和/或输出图像路径

Python imageai两个时代后出错:VALUERROR:确保指定了正确的输入图像、输入类型、输出类型和/或输出图像路径,python,tensorflow,cnn,kaggle,imageai,Python,Tensorflow,Cnn,Kaggle,Imageai,这是我收到的错误,我不知道为什么会发生这种情况,因为第一个纪元运行顺利,我已经给出了包含整个代码的笔记本文件。我在Kaggle笔记本电脑上做这件事,而互联网和GPU是开着的 我使用的是tensorflow 1,这是一个动作识别任务,我从零开始构建了模型,因此您可能需要查看详细信息,下面是我的全部代码,只缺少一个函数,但它并不重要,我还对它下一步进行了注释,以便您了解它的功能 代码 ''' !cp-r../input/imageai/imageai/imageai/imageai !python3

这是我收到的错误,我不知道为什么会发生这种情况,因为第一个纪元运行顺利,我已经给出了包含整个代码的笔记本文件。我在Kaggle笔记本电脑上做这件事,而互联网和GPU是开着的

我使用的是tensorflow 1,这是一个动作识别任务,我从零开始构建了模型,因此您可能需要查看详细信息,下面是我的全部代码,只缺少一个函数,但它并不重要,我还对它下一步进行了注释,以便您了解它的功能

代码 '''

!cp-r../input/imageai/imageai/imageai/imageai
!python3-c'导入张量流作为tf;用于Python3的打印
!python-c’导入keras;打印(keras.\uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
1.12.0
使用TensorFlow后端。
2.2.4
将numpy导入为np#线性代数
导入熊猫作为pd#数据处理、CSV文件I/O(例如pd.read#U CSV)
在输入目录下
导入操作系统
从keras.preprocessing.image导入ImageDataGenerator
从keras.applications.inception\u v3导入预处理\u输入
从keras.utils.data_utils导入生成器查询
将matplotlib.pyplot作为plt导入
作为pd进口熊猫
将numpy作为np导入
导入数学,操作系统
从imageai.Detection导入ObjectDetection
进口警告
从keras.callbacks导入模型检查点
从imageai.Detection导入ObjectDetection
导入操作系统
进口cv2
将numpy作为np导入
进口舒蒂尔
从keras.models导入顺序
从keras.callbacks导入回调
从keras.layers导入Conv2D、MaxPoolig2D、Dropout、Dense、Flatten、UpSampling2D
从keras导入后端为K
随机输入
导入glob
导入子流程
导入操作系统
从PIL导入图像
将numpy作为np导入
从matplotlib.pyplot导入imshow,图
从keras.layers导入Lambda、重塑、置换、输入、添加、Conv3D、高斯噪声、连接
从keras.layers导入ConvLSTM2D、BatchNormalization、TimeDistributed、Add
从keras.models导入模型
警告。过滤器警告(“忽略”)
%matplotlib内联
物体检测速度=“最快”
预训练模式路径=“/kaggle/input/imageai/resnet50\u coco\u best\u v2.0.1.h5”
FONT=cv2.FONT\u HERSHEY\u SIMPLEX
IMG_尺寸=128
帧\批量\大小=4
阈值差异公差=80
批量大小=8
纪元=5
使用TensorFlow后端。
检测器=对象检测()
detector.setModelTypeAsRetinaNet()的
detector.setModelPath(预训练的模型路径)
detector.loadModel(detection_speed=OBJECT_detection_speed)#更改参数以调整精度和速度
自定义=检测器。自定义对象(person=True)
训练方向=“/kaggle/input/facility-final-2/welp”
val_dir=“/kaggle/input/moviesvolencenonviolence/movies/”
总帧数列车=0
总帧数有效=0
def get_框(框架):
_,detections=detector.detectCustomObject fromImage(
自定义对象=自定义,
输入\ u type=“数组”,
输入图像=帧,
输出类型=“数组”
)
返回检测
def帧预处理(帧):
frame=cv2.CVT颜色(frame,cv2.COLOR\u BGR2GRAY)
frame=cv2.resize(frame,dsize=(IMG\u SIZE,IMG\u SIZE),interpolation=cv2.INTER\u CUBIC)
回程架
def获取帧差(帧1、帧2):
frame=cv2.absdiff(frame_1,frame_2)
ret,thresh1=cv2。threshold(帧,threshold_DIFF_TOLERANCE,255,cv2。THRESH_BINARY)#127是threshold
返回阈值1
def遮罩_帧(帧、检测、检测温度):
掩码=np.zero(frame.shape,dtype=np.uint8)
对于检测中的每个项目:
x1、y1、x2、y2=每个对象[“框点”]
掩模=cv2.矩形(掩模,(x1,y1),(x2,y2),(255255),-1)
检测中的每个项目的温度:
x1、y1、x2、y2=每个对象[“框点”]
掩模=cv2.矩形(掩模,(x1,y1),(x2,y2),(255255),-1)
结果=cv2.按位_和(帧,掩码)#带二进制掩码的掩码输入图像
结果[掩码==0]=255#可选:设置背景->现在为白色/默认为黑色
返回结果
def my_Generator(批量大小、目录中、每个类别的视频):
总帧数列车=0
总帧数有效=0
list\u fight=os.listdir(os.path.join(在\u dir中,“暴力”))
list\u no\u fight=os.listdir(os.path.join(在“非暴力”中)
战斗最终=随机。样本(列出每类战斗、视频)
无战斗\u最终=随机。样本(按类别列出无战斗、视频)
战斗标签=[]
无战斗标签=[]
对于范围内的i(每个类别的视频):
战斗标签。追加([1,0])
没有标签。追加([0,1])
决赛=决战\决赛+不决战\决赛
标签=战斗标签+无战斗标签
c=列表(zip(最终标签))
随机。洗牌(c)
名称、标签=zip(*c)
图像\u批次=[]
labelss=[]
图像\u批次=[]
标签=[]
计数器=0
#打印(“支票”)
尽管如此:
对于范围内的i(len(name)):##视频循环数
#打印(“检查1”)
如果标签[i]==[1,0]:
in_file=os.path.join(in_dir,“暴力”)
其他:
in_file=os.path.join(in_dir,“非暴力”)
in_file=os.path.join(in_file,name[i])
vidcap=cv2.VideoCapture(在文件中)
长度=int(视频中的vidcap.get(cv2.CAP_PROP_FRAME_COUNT))帧
如果in_dir==“/kaggle/input/暴力-final-2/welp/”:
总帧数列车=总帧数列车+长度
其他:
总帧数有效=总帧数有效+长度
对于范围内的j(int(长度/帧\批量\大小)):
检测温度2=[]
成功,frame_temp=vidcap.read()
frame_temp=cv2.resize(frame_temp,dsize=(IMG_大小,IMG_大小),插值=cv2.INTER_立方体)
检测温度=获取框(帧温度)
帧温度=掩码帧
!cp -r ../input/imageai/imageai/imageai/ imageai

!python3 -c 'import tensorflow as tf; print(tf.__version__)'  # for Python 3

!python -c 'import keras; print(keras.__version__)'
1.12.0
Using TensorFlow backend.
2.2.4

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
 under the input directory

import os

from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
from keras.utils.data_utils import GeneratorEnqueuer
import matplotlib.pyplot as plt
import pandas as pd 
import numpy as np 
import math, os
from imageai.Detection import ObjectDetection
import warnings
from keras.callbacks import ModelCheckpoint
from imageai.Detection import ObjectDetection
import os
import cv2
import numpy as np
import shutil

from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, UpSampling2D
from keras import backend as K

import random
import glob
import subprocess
import os
from PIL import Image
import numpy as np
from matplotlib.pyplot import imshow, figure

from keras.layers import Lambda, Reshape, Permute, Input, add, Conv3D, GaussianNoise, concatenate
from keras.layers import ConvLSTM2D, BatchNormalization, TimeDistributed, Add
from keras.models import Model

warnings.filterwarnings("ignore")
%matplotlib inline


OBJECT_DETECTION_SPEED = "fastest"
PRETRAINED_MODEL_PATH = "/kaggle/input/imageai/resnet50_coco_best_v2.0.1.h5"
FONT = cv2.FONT_HERSHEY_SIMPLEX
IMG_SIZE = 128
FRAME_BATCH_SIZE = 4
THRESHOLD_DIFF_TOLERANCE = 80
batch_size = 8
EPOCHS = 5
Using TensorFlow backend.
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(PRETRAINED_MODEL_PATH)
detector.loadModel(detection_speed = OBJECT_DETECTION_SPEED) #change parameter to adjust accuracy and speed
custom = detector.CustomObjects(person=True)
train_dir = "/kaggle/input/violence-final-2/welp"
val_dir = "/kaggle/input/moviesviolencenonviolence/movies/"

total_number_frames_train = 0
total_number_frames_valid = 0
def get_boxes(frame):
    _, detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom,
        input_type="array",
        input_image= frame,
        output_type="array"
        )
    return detections

def frame_preprocessing(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.resize(frame, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_CUBIC)

    return frame

def get_frame_difference(frame_1, frame_2):
    frame = cv2.absdiff(frame_1, frame_2)
    ret,thresh1 = cv2.threshold(frame,THRESHOLD_DIFF_TOLERANCE,255,cv2.THRESH_BINARY)   #127 is threshold
    return thresh1

def mask_frame(frame, detections, detections_temp):
    mask = np.zeros(frame.shape, dtype=np.uint8)

    for eachObject in detections:
        x1,y1,x2,y2 = eachObject["box_points"]
        mask = cv2.rectangle(mask, (x1, y1),(x2,y2), (255,255,255), -1) 

    for eachObject in detections_temp:
        x1,y1,x2,y2 = eachObject["box_points"]
        mask = cv2.rectangle(mask, (x1, y1),(x2,y2), (255,255,255), -1) 

    result = cv2.bitwise_and(frame, mask)   # Mask input image with binary mask
    result[mask==0] = 255   # Optional : set background -> now white/ by default black

    return result
            
def my_generater(batch_size , in_dir, videos_per_category):           
    
    total_number_frames_train = 0
    total_number_frames_valid = 0

    list_fight=os.listdir(os.path.join(in_dir,"Violence"))
    list_no_fight=os.listdir(os.path.join(in_dir,"NonViolence"))

    fight_final=random.sample(list_fight, videos_per_category)
    no_fight_final=random.sample(list_no_fight,videos_per_category)

    fight_labels = []
    no_fight_labels = []

    for i in range (videos_per_category):
        fight_labels.append([1,0])
        no_fight_labels.append([0,1])

    final = fight_final + no_fight_final
    labels = fight_labels + no_fight_labels

    c = list(zip(final,labels))
    random.shuffle(c)
    names, labels = zip(*c)
    
    images_batches=[]
    labelsss=[]
    images_batches=[]
    labelss=[]
    counter = 0
    
#     print("check")
    while True:
        for i in range(len(names)): ##no. of videos loop
            
#             print("check1")
            if labels[i]==[1,0]:
                in_file = os.path.join(in_dir,"Violence")
            else:
                in_file = os.path.join(in_dir,"NonViolence")
                
            in_file = os.path.join(in_file, names[i])
            vidcap = cv2.VideoCapture(in_file)
            length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))  #frames in a video
            if in_dir == "/kaggle/input/violence-final-2/welp/":
                total_number_frames_train = total_number_frames_train + length
            else:
                total_number_frames_valid = total_number_frames_valid + length
        
            for j in range(int(length/FRAME_BATCH_SIZE)):
                detections_temp_2=[]
                success,frame_temp = vidcap.read()
                frame_temp = cv2.resize(frame_temp, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_CUBIC)
                detections_temp = get_boxes(frame_temp) 
                frame_temp = mask_frame(frame_temp, detections_temp, detections_temp_2)                    
                frame_temp = cv2.cvtColor(frame_temp, cv2.COLOR_BGR2GRAY)
                
                images_frame_batches=[]

                for k in range(FRAME_BATCH_SIZE - 1):
                    
                    success,frame = vidcap.read()
                    frame = cv2.resize(frame, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_CUBIC)
                    detections = get_boxes(frame)                   
                    frame = mask_frame(frame, detections, detections_temp)   
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)   
                    diff = get_frame_difference(frame, frame_temp)
                    diff = diff/255
                    images_frame_batches.append(diff)
                    
                    frame_temp = frame
                    detections_temp=detections

                if counter < batch_size:
                    images_batches.append(images_frame_batches)
                    counter = counter + 1
                    labelss.append(labels[i])
                    

                else:
                    yield np.array(images_batches).reshape((batch_size, FRAME_BATCH_SIZE-1, IMG_SIZE, IMG_SIZE, 1)), np.array(labelss)
                    labelss=[]
                    images_batches=[]
                    counter = 0
        break
  
gen = my_generater(2, train_dir, 800)
videos, next_frame = next(gen)

print(np.array(videos).shape)
np.array(next_frame).shape
(2, 3, 128, 128, 1)
(2, 2)

#gets total frames combined of all videos
total_number_frames_train = get_total_frames(train_dir, 800)
total_number_frames_valid = get_total_frames(val_dir, 100)

print(total_number_frames_train)
print(total_number_frames_valid)
224108
18283
steps_per_epoch = total_number_frames_train // (batch_size * FRAME_BATCH_SIZE)
validation_steps = total_number_frames_valid // (batch_size * FRAME_BATCH_SIZE)

print(steps_per_epoch)
print(validation_steps)
7003
571
inp = Input((FRAME_BATCH_SIZE - 1,IMG_SIZE,IMG_SIZE,1))
permuted = Permute((2,3,4,1))(inp)
noise = GaussianNoise(0.1)(permuted)
c=4
x = Permute((4,1,2,3))(noise)
x =(ConvLSTM2D(filters=c, kernel_size=(3,3),padding='same',name='conv_lstm1', return_sequences=True))(x)

c1=(BatchNormalization())(x)
x = Dropout(0.2)(x)
x =(TimeDistributed(MaxPooling2D(pool_size=(2,2))))(c1)

x =(ConvLSTM2D(filters=2*c,kernel_size=(3,3),padding='same',name='conv_lstm2',return_sequences=True))(x)
c2=(BatchNormalization())(x)
x = Dropout(0.2)(x)

x =(TimeDistributed(MaxPooling2D(pool_size=(2,2))))(c2)
x =(ConvLSTM2D(filters=4*c,kernel_size=(3,3),padding='same',name='conv_lstm3',return_sequences=True))(x)

x =(TimeDistributed(UpSampling2D(size=(2, 2))))(x)
x =(ConvLSTM2D(filters=4*c,kernel_size=(3,3),padding='same',name='conv_lstm4',return_sequences=True))(x)
x =(BatchNormalization())(x)

x =(ConvLSTM2D(filters=2*c,kernel_size=(3,3),padding='same',name='conv_lstm5',return_sequences=True))(x)
x =(BatchNormalization())(x)
x = Add()([c2, x])
x = Dropout(0.2)(x)

x =(TimeDistributed(UpSampling2D(size=(2, 2))))(x)
x =(ConvLSTM2D(filters=c,kernel_size=(3,3),padding='same',name='conv_lstm6',return_sequences=False))(x)
x =(BatchNormalization())(x)

x = (Flatten())(x)
x = (Dense(units=50, activation='relu'))(x)

x = (Dense(units=2, activation='relu'))(x)

model=Model(inputs=[inp], outputs=[x])

model.summary()  #works, but i have deleted it due to character limit

   

Total params: 3,322,408
Trainable params: 3,322,328
Non-trainable params: 80
__________________________________________________________________________________________________
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
!pip install tensorflow-gpu==1.14.0
import tensorflow as tf
Collecting tensorflow-gpu==1.14.0
  Downloading https://files.pythonhosted.org/packages/76/04/43153bfdfcf6c9a4c38ecdb971ca9a75b9a791bb69a764d652c359aca504/tensorflow_gpu-1.14.0-cp36-cp36m-manylinux1_x86_64.whl (377.0MB)
    100% |████████████████████████████████| 377.0MB 113kB/s 
Requirement already satisfied: six>=1.10.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.12.0)
Collecting google-pasta>=0.1.6 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/a3/de/c648ef6835192e6e2cc03f40b19eeda4382c49b5bafb43d88b931c4c74ac/google_pasta-0.2.0-py3-none-any.whl (57kB)
    100% |████████████████████████████████| 61kB 6.6MB/s 
Collecting tensorboard<1.15.0,>=1.14.0 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/91/2d/2ed263449a078cd9c8a9ba50ebd50123adf1f8cfbea1492f9084169b89d9/tensorboard-1.14.0-py3-none-any.whl (3.1MB)
    100% |████████████████████████████████| 3.2MB 13.3MB/s 
Requirement already satisfied: keras-applications>=1.0.6 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.0.7)
Collecting tensorflow-estimator<1.15.0rc0,>=1.14.0rc0 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/3c/d5/21860a5b11caf0678fbc8319341b0ae21a07156911132e0e71bffed0510d/tensorflow_estimator-1.14.0-py2.py3-none-any.whl (488kB)
    100% |████████████████████████████████| 491kB 24.4MB/s 
Requirement already satisfied: grpcio>=1.8.6 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.18.0)
Requirement already satisfied: absl-py>=0.7.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.7.0)
Requirement already satisfied: numpy<2.0,>=1.14.5 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.16.1)
Requirement already satisfied: termcolor>=1.1.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.1.0)
Requirement already satisfied: wheel>=0.26 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.31.1)
Requirement already satisfied: gast>=0.2.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.2.2)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.0.9)
Requirement already satisfied: protobuf>=3.6.1 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (3.6.1)
Requirement already satisfied: astor>=0.6.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.7.1)
Collecting wrapt>=1.11.1 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/82/f7/e43cefbe88c5fd371f4cf0cf5eb3feccd07515af9fd6cf7dbf1d1793a797/wrapt-1.12.1.tar.gz
Requirement already satisfied: markdown>=2.6.8 in /opt/conda/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0) (3.0.1)
Requirement already satisfied: werkzeug>=0.11.15 in /opt/conda/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0) (0.14.1)
Collecting setuptools>=41.0.0 (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/44/a6/7fb6e8b3f4a6051e72e4e2218889351f0ee484b9ee17e995f5ccff780300/setuptools-50.3.0-py3-none-any.whl (785kB)
    100% |████████████████████████████████| 788kB 20.3MB/s 
Requirement already satisfied: h5py in /opt/conda/lib/python3.6/site-packages (from keras-applications>=1.0.6->tensorflow-gpu==1.14.0) (2.9.0)
Building wheels for collected packages: wrapt
  Running setup.py bdist_wheel for wrapt ... - \ | / done
  Stored in directory: /root/.cache/pip/wheels/b1/c2/ed/d62208260edbd3fa7156545c00ef966f45f2063d0a84f8208a
Successfully built wrapt
thinc 6.12.1 has requirement wrapt<1.11.0,>=1.10.0, but you'll have wrapt 1.12.1 which is incompatible.
tensorflow 1.12.0 has requirement tensorboard<1.13.0,>=1.12.0, but you'll have tensorboard 1.14.0 which is incompatible.
pytest-cov 2.6.1 has requirement pytest>=3.6, but you'll have pytest 3.5.1 which is incompatible.
anaconda-client 1.6.14 has requirement python-dateutil>=2.6.1, but you'll have python-dateutil 2.6.0 which is incompatible.
Installing collected packages: google-pasta, setuptools, tensorboard, tensorflow-estimator, wrapt, tensorflow-gpu
  Found existing installation: setuptools 39.1.0
    Uninstalling setuptools-39.1.0:
      Successfully uninstalled setuptools-39.1.0
  Found existing installation: tensorboard 1.12.2
    Uninstalling tensorboard-1.12.2:
      Successfully uninstalled tensorboard-1.12.2
  Found existing installation: wrapt 1.10.11
Cannot uninstall 'wrapt'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
You are using pip version 18.1, however version 20.2.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
# device_name = tf.test.gpu_device_name()
if tf.test.gpu_device_name():
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
    print("Please install GPU version of TF")
Default GPU Device: /device:GPU:0
filepath = "/kaggle/working/saved-model-{epoch:02d}-{val_acc:.2f}.hdf5"

checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=False, mode='max')
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 56} ) 
sess = tf.Session(config=config) 
tf.keras.backend.set_session(sess)
model.fit_generator(my_generater(batch_size, train_dir, 800),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=EPOCHS,
                    validation_steps=validation_steps//4,
                    validation_data=my_generater(batch_size, val_dir, 100),
                    callbacks = [checkpoint])
Epoch 1/5
1750/1750 [==============================] - 4570s 3s/step - loss: 0.5375 - acc: 0.5976 - val_loss: 0.5000 - val_acc: 0.5599

Epoch 00001: saving model to /kaggle/working/saved-model-01-0.56.hdf5
Epoch 2/5
   5/1750 [..............................] - ETA: 3:58 - loss: 0.5000 - acc: 0.3000
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
/kaggle/working/imageai/Detection/__init__.py in detectCustomObjectsFromImage(self, custom_objects, input_image, output_image_path, input_type, output_type, extract_detected_objects, minimum_percentage_probability, display_percentage_probability, display_object_name)
    694                     model = self.__model_collection[0]
--> 695                     _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
    696                     predicted_numbers = np.argmax(detections[0, :, 4:], axis=1)

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in predict_on_batch(self, x)
   1273         self._make_predict_function()
-> 1274         outputs = self.predict_function(ins)
   1275         return unpack_singleton(outputs)

/opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:

/opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
    527             compat.as_text(c_api.TF_Message(self.status.status)),
--> 528             c_api.TF_GetCode(self.status.status))
    529     # Delete the underlying status object from memory otherwise it stays alive

InvalidArgumentError: indices[0] = 78 is not in [0, 0)
     [[{{node nms/embedding_lookup_52}} = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](nms/Cast_17, nms/non_max_suppression_17/NonMaxSuppressionV3, nms/embedding_lookup_52/axis)]]

During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)
<ipython-input-19-37687fd66644> in <module>()
      4                     validation_steps=validation_steps//4,
      5                     validation_data=my_generater(batch_size, val_dir, 100),
----> 6                     callbacks = [checkpoint])

/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/opt/conda/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output, '__len__'):

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
    707                     "`use_multiprocessing=False, workers > 1`."
    708                     "For more information see issue #1638.")
--> 709             six.reraise(*sys.exc_info())

/opt/conda/lib/python3.6/site-packages/six.py in reraise(tp, value, tb)
    691             if value.__traceback__ is not tb:
    692                 raise value.with_traceback(tb)
--> 693             raise value
    694         finally:
    695             value = None

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/opt/conda/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
    642             return self._value
    643         else:
--> 644             raise self._value
    645 
    646     def _set(self, i, obj):

/opt/conda/lib/python3.6/multiprocessing/pool.py in worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
    117         job, i, func, args, kwds = task
    118         try:
--> 119             result = (True, func(*args, **kwds))
    120         except Exception as e:
    121             if wrap_exception and func is not _helper_reraises_exception:

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in next_sample(uid)
    624         The next value of generator `uid`.
    625     """
--> 626     return six.next(_SHARED_SEQUENCES[uid])
    627 
    628 

<ipython-input-8-1e63801f9903> in my_generater(batch_size, in_dir, videos_per_category)
     65                     success,frame = vidcap.read()
     66                     frame = cv2.resize(frame, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_CUBIC)
---> 67                     detections = get_boxes(frame)
     68                     frame = mask_frame(frame, detections, detections_temp)
     69                     frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

<ipython-input-6-a5006495da23> in get_boxes(frame)
      4         input_type="array",
      5         input_image= frame,
----> 6         output_type="array"
      7         )
      8     return detections

/kaggle/working/imageai/Detection/__init__.py in detectCustomObjectsFromImage(self, custom_objects, input_image, output_image_path, input_type, output_type, extract_detected_objects, minimum_percentage_probability, display_percentage_probability, display_object_name)
    884             except:
    885                 raise ValueError(
--> 886                     "Ensure you specified correct input image, input type, output type and/or output image path ")
    887 
    888 

ValueError: Ensure you specified correct input image, input type, output type and/or output image path