Tensorflow 张量流形值

Tensorflow 张量流形值,tensorflow,keras,tensorflow2.0,reshape,tf.keras,Tensorflow,Keras,Tensorflow2.0,Reshape,Tf.keras,在使用tensorflow.keras.layers.Reformate时,我遇到了一些奇怪的错误。它从哪里获得47409408值?207936对应于正确的尺寸(69312*3) 一个奇怪的方面是,如果我把一个扁平层之前,重塑它的工作 Layer (type) Output Shape Param # =================================================================

在使用tensorflow.keras.layers.Reformate时,我遇到了一些奇怪的错误。它从哪里获得47409408值?207936对应于正确的尺寸(69312*3)

一个奇怪的方面是,如果我把一个扁平层之前,重塑它的工作

Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 304, 228, 3)       30        
_________________________________________________________________
reshape (Reshape)            (None, 69312, 3)          0         
=================================================================
Total params: 30
Trainable params: 30
Non-trainable params: 0
____________________________________
(0)无效参数:重塑的输入是一个具有207936个值的张量,但请求的形状具有47409408个值


您还可以输入代码吗?您可能正在向网络传递一个形状不正确的输入,但如果没有其他代码(模型创建、模型调用和数据准备),就很难知道。
47409408
可能是您的conv2d层输出的倍数
(304*228*3)*228
-您的输入大小是多少?已更新,请注意有关重塑前展平层如何工作的注释@SoheilStar@BraulioBarahona,输入大小为(批次,304,228,1)
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from PIL import Image
from tensorflow.keras import datasets, layers, models, preprocessing
import os
from natsort import natsorted
from tensorflow.keras.models import Model

BATCH_SIZE = 32
EPOCHS = 15
LEARNING_RATE = 1e-4

#jpegs with values from 0 to 255
img_dir = ".../normalized_imgs"
# .npy files of size (69312,3)
pts_dir = ".../normalized_pts"
img_files = [os.path.join(img_dir, f)
               for f in natsorted(os.listdir(img_dir))]

pts_files = [os.path.join(pts_dir, f)
            for f in natsorted(os.listdir(pts_dir))]

img = Image.open(img_files[0])
pts = np.load(pts_files[0])

def parse_img_input(img_file, pts_file):
        def _parse_input(img_file, pts_file):
                # get image
                d_filepath = img_file.numpy().decode()
                d_image_decoded = tf.image.decode_jpeg(tf.io.read_file(d_filepath), channels=1)
                d_image = tf.cast(d_image_decoded, tf.float32) / 255.0
    
                # get numpy data
                pts_filepath = pts_file.numpy().decode()
                pts = np.load(pts_filepath, allow_pickle= True)

                print("d_image ",d_image.shape )
                return d_image, pts
        return tf.py_function(_parse_input,
                              inp=[img_file, pts_file],
                              Tout=[tf.float32, tf.float32])

class SimpleCNN(Model):
        def __init__(self):
                super(SimpleCNN, self).__init__()
                input_shape = (img.size[0], img.size[1], 1)
                self.model = model = models.Sequential()
                model.add(tf.keras.Input(shape= input_shape))
                model.add(layers.Conv2D(3, (3,3), padding='same'))
                model.add(layers.Reshape((pts.shape[0], pts.shape[1])))


# split input data into train, test sets
X_train_file, X_test_file, y_train_file, y_test_file = train_test_split(img_files, pts_files, 
                                                    test_size=0.2, 
                                                    random_state=0)

model = SimpleCNN()

dataset_train = tf.data.Dataset.from_tensor_slices((X_train_file, y_train_file))
dataset_train = dataset_train.map(parse_img_input)

dataset_test = tf.data.Dataset.from_tensor_slices((X_test_file, y_test_file))
dataset_test = dataset_test.map(parse_img_input)

model.compile(optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), loss= tf.losses.MeanSquaredError(), metrics= [tf.keras.metrics.get('accuracy')])
model.fit(dataset_train, epochs=EPOCHS, shuffle=True, validation_data= dataset_test)