Python 3.x ValueError尝试将值(类型模型)转换为张量
我正在尝试使用tensorflow 2训练一个模型 我收到错误信息:Python 3.x ValueError尝试将值(类型模型)转换为张量,python-3.x,tensorflow,tensorflow2.0,Python 3.x,Tensorflow,Tensorflow2.0,我正在尝试使用tensorflow 2训练一个模型 我收到错误信息: ValueError: Attempt to convert a value (<tensorflow.python.keras.engine.training.Model object at 0x7f1ab822ecc0>) with an unsupported type (<class 'tensorflow.python.keras.engine.training.Model'>) to a T
ValueError: Attempt to convert a value (<tensorflow.python.keras.engine.training.Model object at 0x7f1ab822ecc0>) with an unsupported type (<class 'tensorflow.python.keras.engine.training.Model'>) to a Tensor.
如果我不使用该类,而是运行:
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(2, activation='relu')(inputs)
preds = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=preds)
for x, y in train_dataset:
ypred = model(x)
print(type(ypred))
loss_object(y, ypred)
它运行正常
型号(x)
的类型为
但是在类代码中,
self.build\u model(X)
的类型是model
在方法中将第一行从ypred=self.build\u model(X)
更改为ypred=self.build\u model()(X)
另一种与数据设置“兼容”的方法:
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7]),
'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6]),
'C': np.array([55, 56.3, 57, 58, 59.5, 60.4]),
'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400])})
def data():
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, :3].values,
df.iloc[:, 3].values,
test_size=0.2,
random_state=134)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = data()
features = {'A': X_train[:, 0],
'B': X_train[:, 1],
'C': X_train[:, 2]}
labels = y_train
batch_size = 1
def train_input_fn(features, labels, batch_size):
train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
train_dataset = train_dataset.shuffle(1000).repeat().batch(batch_size)
return train_dataset
def pack_features_vector(features, labels):
'''Pack the features into a single array'''
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)
class Model(tf.keras.Model):
def __init__(self):
super(Model,self).__init__()
self.l1= tf.keras.layers.Dense(2, activation='relu')
self.out = tf.keras.layers.Dense(1)
def __call__(self,x):
x=self.l1(x)
return self.out(x)
learning_rate = 1
optimizer=tf.optimizers.RMSprop(learning_rate)
loss_object=tf.keras.losses.mean_squared_error
model = Model()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
for x,y in train_dataset:
with tf.GradientTape() as tape:
y_ = model(x)
loss = loss_object(y, y_)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print("loss",train_loss(loss),"accuracy",train_accuracy(y,y_))
如果您将tf.keras子类化,它是否有效。Model@DecentGradient:如果你的意思是
类模型(tf.keras.Model):
,不会出现同样的错误谢谢你的帮助,你的代码很好,但我真的想知道我的代码有什么问题。在你的loss方法中,你能将第一行从ypred=self.build\u Model(X)改为
to`ypred=self.build\u model()(X)是的,这就是问题所在..我之前发现了..如果您愿意,请在回答中提及这一点以便接受。谢谢
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7]),
'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6]),
'C': np.array([55, 56.3, 57, 58, 59.5, 60.4]),
'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400])})
def data():
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, :3].values,
df.iloc[:, 3].values,
test_size=0.2,
random_state=134)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = data()
features = {'A': X_train[:, 0],
'B': X_train[:, 1],
'C': X_train[:, 2]}
labels = y_train
batch_size = 1
def train_input_fn(features, labels, batch_size):
train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
train_dataset = train_dataset.shuffle(1000).repeat().batch(batch_size)
return train_dataset
def pack_features_vector(features, labels):
'''Pack the features into a single array'''
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)
class Model(tf.keras.Model):
def __init__(self):
super(Model,self).__init__()
self.l1= tf.keras.layers.Dense(2, activation='relu')
self.out = tf.keras.layers.Dense(1)
def __call__(self,x):
x=self.l1(x)
return self.out(x)
learning_rate = 1
optimizer=tf.optimizers.RMSprop(learning_rate)
loss_object=tf.keras.losses.mean_squared_error
model = Model()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
for x,y in train_dataset:
with tf.GradientTape() as tape:
y_ = model(x)
loss = loss_object(y, y_)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print("loss",train_loss(loss),"accuracy",train_accuracy(y,y_))