Python 将形状为(5,75,100,10)的目标阵列传递为形状输出(无,74,100,10),同时使用“均方误差”作为损失`

Python 将形状为(5,75,100,10)的目标阵列传递为形状输出(无,74,100,10),同时使用“均方误差”作为损失`,python,numpy,tensorflow,machine-learning,conda,Python,Numpy,Tensorflow,Machine Learning,Conda,我不知道修复后该怎么办。这个错误属于我复制到Python文件中的Jupyter笔记本 以下是保存的jupyter笔记本,其中也包含错误: 下面是Python代码: (/scratch3/3d_pose/DeepPoseKitEnv) [jalal@goku examples]$ cat dlc_train.py import sys import tensorflow as tf print(tf.__version__) import numpy as np import matplotli

我不知道修复后该怎么办。这个错误属于我复制到Python文件中的Jupyter笔记本

以下是保存的jupyter笔记本,其中也包含错误:

下面是Python代码:

(/scratch3/3d_pose/DeepPoseKitEnv) [jalal@goku examples]$ cat dlc_train.py 
import sys
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
import glob

from deepposekit.io import TrainingGenerator, DLCDataGenerator
from deepposekit.augment import FlipAxis
import imgaug.augmenters as iaa
import imgaug as ia

from deepposekit.models import (StackedDenseNet,
                                DeepLabCut,
                                StackedHourglass,
                                LEAP)
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

from deepposekit.callbacks import Logger, ModelCheckpoint
from deepposekit.models import load_model

import time
from os.path import expanduser

try:
    import google.colab
    IN_COLAB = True
except:
    IN_COLAB = False

data_generator = DLCDataGenerator(
    project_path='/scratch3/3d_pose/animalpose/experiments/moth-filtered-Mona-2019-12-06_95p_DONE/'
)

print(data_generator.dlcconfig)

data_generator.graph = np.array([-1, 0, 0, 0])

data_generator.swap_index = np.array([-1, 2, 1, -1])


image, keypoints = data_generator[0]

plt.figure(figsize=(5,5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
    if jdx > -1:
        plt.plot(
            [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
            [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
            'r-'
        )

plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.xlim(0, data_generator.image_shape[1])
plt.ylim(0, data_generator.image_shape[0])

plt.show()


augmenter = []

augmenter.append(FlipAxis(data_generator, axis=0))  # flip image up-down
augmenter.append(FlipAxis(data_generator, axis=1))  # flip image left-right 

sometimes = []
sometimes.append(iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
                            translate_percent={'x': (-0.5, 0.5), 'y': (-0.5, 0.5)},
                            shear=(-8, 8),
                            order=ia.ALL,
                            cval=ia.ALL)
                 )
sometimes.append(iaa.Affine(scale=(0.5, 1.5),
                            order=ia.ALL,
                            cval=ia.ALL)
                 )
augmenter.append(iaa.Sometimes(0.5, sometimes))
augmenter.append(iaa.Sometimes(0.5, iaa.Affine(rotate=(-180, 180),
                            order=ia.ALL,
                            cval=ia.ALL))
                 )
augmenter = iaa.Sequential(augmenter)







image, keypoints = data_generator[0]
image, keypoints = augmenter(images=image, keypoints=keypoints)
plt.figure(figsize=(5,5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
    if jdx > -1:
        plt.plot(
            [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
            [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
            'r-'
        )

plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.xlim(0, data_generator.image_shape[1])
plt.ylim(0, data_generator.image_shape[0])

plt.show()


train_generator = TrainingGenerator(generator=data_generator,
                                    downsample_factor=3,
                                    augmenter=augmenter,
                                    sigma=5,
                                    validation_split=0.1,
                                    use_graph=True,
                                    random_seed=1,
                                    graph_scale=1)
train_generator.get_config()


n_keypoints = data_generator.keypoints_shape[0]
batch = train_generator(batch_size=1, validation=False)[0]
inputs = batch[0]
outputs = batch[1]

fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10,10))
ax1.set_title('image')
ax1.imshow(inputs[0,...,0], cmap='gray', vmin=0, vmax=255)

ax2.set_title('posture graph')
ax2.imshow(outputs[0,...,n_keypoints:-1].max(-1))

ax3.set_title('keypoints confidence')
ax3.imshow(outputs[0,...,:n_keypoints].max(-1))

ax4.set_title('posture graph and keypoints confidence')
ax4.imshow(outputs[0,...,-1], vmin=0)
plt.show()

train_generator.on_epoch_end()


from deepposekit.models import DeepLabCut, StackedDenseNet, LEAP

#model = StackedDenseNet(train_generator, n_stacks=1, growth_rate=32, pretrained=True)
#model = DeepLabCut(train_generator, backbone="resnet50")
#model = DeepLabCut(train_generator, backbone="mobilenetv2", alpha=1.0) # Increase alpha to improve accuracy
model = DeepLabCut(train_generator, backbone="densenet121")
#model = LEAP(train_generator)
model.get_config()


data_size = (500,) + data_generator.image_shape
x = np.random.randint(0, 255, data_size, dtype="uint8")
y = model.predict(x[:100], batch_size=50) # make sure the model is in GPU memory
t0 = time.time()
y = model.predict(x, batch_size=50, verbose=1)
t1 = time.time()
print(x.shape[0] / (t1 - t0))


logger = Logger(validation_batch_size=10
    # filepath saves the logger data to a .h5 file
    # filepath=HOME + "/deeplabcut_log_dlcdensenet.h5", validation_batch_size=10
)



reduce_lr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, verbose=1, patience=20)


model_checkpoint = ModelCheckpoint(
    "../../deeplabcut_best_model_dlcdensenet_moth.h5",
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    verbose=1,
    save_best_only=True,
    optimizer=True, # Set this to True if you wish to resume training from a saved model
)

early_stop = EarlyStopping(
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    min_delta=0.001,
    patience=100,
    verbose=1
)



callbacks = [early_stop, reduce_lr, model_checkpoint, logger]


model.fit(
    batch_size=5,
    validation_batch_size=10,
    callbacks=callbacks,
    #epochs=1000, # Increase the number of epochs to train the model longer
    epochs=100,
    n_workers=8,
    steps_per_epoch=200,
)


model = load_model(
    "../../deeplabcut_best_model_dlcdensenet_moth.h5",
    augmenter=augmenter,
    generator=data_generator,
)


model.fit(
    batch_size=5,
    validation_batch_size=10,
    callbacks=callbacks,
    #epochs=1000, # Increase the number of epochs to train the model longer
    epochs=100,
    n_workers=8,
    steps_per_epoch=200,

这些形状值对您有意义吗?为什么一个有
74
而另一个有
75
None
tensorflow
独有的。请尝试提供一个。它不是我的代码@hpaulj这是我复制到python文件中的jupyter笔记本中的代码@Ente检查更新后的第一行question@Ente你能看一看吗
(/scratch3/3d_pose/DeepPoseKitEnv) [jalal@goku examples]$ pip list
Package                Version            
---------------------- -------------------
absl-py                0.8.1              
astor                  0.8.0              
attrs                  19.3.0             
backcall               0.1.0              
bleach                 3.1.0              
certifi                2019.11.28         
chardet                3.0.4              
Click                  7.0                
cycler                 0.10.0             
decorator              4.4.1              
deeplabcut             2.1.4              
deepposekit            0.3.4              
defusedxml             0.6.0              
easydict               1.9                
entrypoints            0.3                
gast                   0.2.2              
google-pasta           0.1.8              
grpcio                 1.25.0             
h5py                   2.10.0             
idna                   2.8                
imageio                2.6.1              
imageio-ffmpeg         0.3.0              
imgaug                 0.3.0              
importlib-metadata     1.2.0              
intel-openmp           2020.0.133         
ipykernel              5.1.3              
ipython                7.10.1             
ipython-genutils       0.2.0              
ipywidgets             7.5.1              
jedi                   0.15.1             
Jinja2                 2.10.3             
joblib                 0.14.0             
jsonschema             3.2.0              
jupyter                1.0.0              
jupyter-client         5.3.4              
jupyter-console        6.0.0              
jupyter-core           4.6.1              
Keras-Applications     1.0.8              
Keras-Preprocessing    1.1.0              
kiwisolver             1.1.0              
Markdown               3.1.1              
MarkupSafe             1.1.1              
matplotlib             3.0.3              
mistune                0.8.4              
mock                   3.0.5              
more-itertools         8.0.2              
moviepy                1.0.1              
msgpack                0.6.2              
msgpack-numpy          0.4.4.3            
nbconvert              5.6.1              
nbformat               4.4.0              
networkx               2.4                
notebook               6.0.2              
numexpr                2.7.0              
numpy                  1.17.4             
opencv-python          3.4.5.20           
opencv-python-headless 4.1.2.30           
opt-einsum             3.1.0              
pandas                 0.25.3             
pandocfilters          1.4.2              
parso                  0.5.1              
patsy                  0.5.1              
pexpect                4.7.0              
pickleshare            0.7.5              
Pillow                 6.2.1              
pip                    19.3.1             
proglog                0.1.9              
prometheus-client      0.7.1              
prompt-toolkit         3.0.2              
protobuf               3.11.1             
psutil                 5.6.7              
ptyprocess             0.6.0              
Pygments               2.5.2              
pyparsing              2.4.5              
Pypubsub               4.0.3              
pyrsistent             0.15.6             
python-dateutil        2.8.1              
pytz                   2019.3             
PyWavelets             1.1.1              
PyYAML                 5.2                
pyzmq                  18.1.1             
qtconsole              4.6.0              
requests               2.22.0             
ruamel.yaml            0.16.5             
ruamel.yaml.clib       0.2.0              
scikit-image           0.16.2             
scikit-learn           0.22               
scipy                  1.3.3              
Send2Trash             1.5.0              
setuptools             42.0.2.post20191203
Shapely                1.6.4.post2        
six                    1.13.0             
statsmodels            0.10.1             
tables                 3.4.3              
tabulate               0.8.6              
tensorboard            1.15.0             
tensorflow-estimator   1.15.1             
tensorflow-gpu         1.15.0             
tensorpack             0.9.8              
termcolor              1.1.0              
terminado              0.8.3              
testpath               0.4.4              
tornado                6.0.3              
tqdm                   4.40.1             
traitlets              4.3.3              
urllib3                1.25.7             
wcwidth                0.1.7              
webencodings           0.5.1              
Werkzeug               0.16.0             
wheel                  0.33.6             
widgetsnbextension     3.5.1              
wrapt                  1.11.2             
wxPython               4.0.3              
zipp                   0.6.0   
(/scratch3/3d_pose/DeepPoseKitEnv) [jalal@goku examples]$ cat dlc_train.py 
import sys
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
import glob

from deepposekit.io import TrainingGenerator, DLCDataGenerator
from deepposekit.augment import FlipAxis
import imgaug.augmenters as iaa
import imgaug as ia

from deepposekit.models import (StackedDenseNet,
                                DeepLabCut,
                                StackedHourglass,
                                LEAP)
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

from deepposekit.callbacks import Logger, ModelCheckpoint
from deepposekit.models import load_model

import time
from os.path import expanduser

try:
    import google.colab
    IN_COLAB = True
except:
    IN_COLAB = False

data_generator = DLCDataGenerator(
    project_path='/scratch3/3d_pose/animalpose/experiments/moth-filtered-Mona-2019-12-06_95p_DONE/'
)

print(data_generator.dlcconfig)

data_generator.graph = np.array([-1, 0, 0, 0])

data_generator.swap_index = np.array([-1, 2, 1, -1])


image, keypoints = data_generator[0]

plt.figure(figsize=(5,5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
    if jdx > -1:
        plt.plot(
            [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
            [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
            'r-'
        )

plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.xlim(0, data_generator.image_shape[1])
plt.ylim(0, data_generator.image_shape[0])

plt.show()


augmenter = []

augmenter.append(FlipAxis(data_generator, axis=0))  # flip image up-down
augmenter.append(FlipAxis(data_generator, axis=1))  # flip image left-right 

sometimes = []
sometimes.append(iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
                            translate_percent={'x': (-0.5, 0.5), 'y': (-0.5, 0.5)},
                            shear=(-8, 8),
                            order=ia.ALL,
                            cval=ia.ALL)
                 )
sometimes.append(iaa.Affine(scale=(0.5, 1.5),
                            order=ia.ALL,
                            cval=ia.ALL)
                 )
augmenter.append(iaa.Sometimes(0.5, sometimes))
augmenter.append(iaa.Sometimes(0.5, iaa.Affine(rotate=(-180, 180),
                            order=ia.ALL,
                            cval=ia.ALL))
                 )
augmenter = iaa.Sequential(augmenter)







image, keypoints = data_generator[0]
image, keypoints = augmenter(images=image, keypoints=keypoints)
plt.figure(figsize=(5,5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
    if jdx > -1:
        plt.plot(
            [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
            [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
            'r-'
        )

plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.xlim(0, data_generator.image_shape[1])
plt.ylim(0, data_generator.image_shape[0])

plt.show()


train_generator = TrainingGenerator(generator=data_generator,
                                    downsample_factor=3,
                                    augmenter=augmenter,
                                    sigma=5,
                                    validation_split=0.1,
                                    use_graph=True,
                                    random_seed=1,
                                    graph_scale=1)
train_generator.get_config()


n_keypoints = data_generator.keypoints_shape[0]
batch = train_generator(batch_size=1, validation=False)[0]
inputs = batch[0]
outputs = batch[1]

fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10,10))
ax1.set_title('image')
ax1.imshow(inputs[0,...,0], cmap='gray', vmin=0, vmax=255)

ax2.set_title('posture graph')
ax2.imshow(outputs[0,...,n_keypoints:-1].max(-1))

ax3.set_title('keypoints confidence')
ax3.imshow(outputs[0,...,:n_keypoints].max(-1))

ax4.set_title('posture graph and keypoints confidence')
ax4.imshow(outputs[0,...,-1], vmin=0)
plt.show()

train_generator.on_epoch_end()


from deepposekit.models import DeepLabCut, StackedDenseNet, LEAP

#model = StackedDenseNet(train_generator, n_stacks=1, growth_rate=32, pretrained=True)
#model = DeepLabCut(train_generator, backbone="resnet50")
#model = DeepLabCut(train_generator, backbone="mobilenetv2", alpha=1.0) # Increase alpha to improve accuracy
model = DeepLabCut(train_generator, backbone="densenet121")
#model = LEAP(train_generator)
model.get_config()


data_size = (500,) + data_generator.image_shape
x = np.random.randint(0, 255, data_size, dtype="uint8")
y = model.predict(x[:100], batch_size=50) # make sure the model is in GPU memory
t0 = time.time()
y = model.predict(x, batch_size=50, verbose=1)
t1 = time.time()
print(x.shape[0] / (t1 - t0))


logger = Logger(validation_batch_size=10
    # filepath saves the logger data to a .h5 file
    # filepath=HOME + "/deeplabcut_log_dlcdensenet.h5", validation_batch_size=10
)



reduce_lr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, verbose=1, patience=20)


model_checkpoint = ModelCheckpoint(
    "../../deeplabcut_best_model_dlcdensenet_moth.h5",
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    verbose=1,
    save_best_only=True,
    optimizer=True, # Set this to True if you wish to resume training from a saved model
)

early_stop = EarlyStopping(
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    min_delta=0.001,
    patience=100,
    verbose=1
)



callbacks = [early_stop, reduce_lr, model_checkpoint, logger]


model.fit(
    batch_size=5,
    validation_batch_size=10,
    callbacks=callbacks,
    #epochs=1000, # Increase the number of epochs to train the model longer
    epochs=100,
    n_workers=8,
    steps_per_epoch=200,
)


model = load_model(
    "../../deeplabcut_best_model_dlcdensenet_moth.h5",
    augmenter=augmenter,
    generator=data_generator,
)


model.fit(
    batch_size=5,
    validation_batch_size=10,
    callbacks=callbacks,
    #epochs=1000, # Increase the number of epochs to train the model longer
    epochs=100,
    n_workers=8,
    steps_per_epoch=200,