Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何减少训练过程中模型损失的方差?_Python_Tensorflow_Neural Network_Gradient Descent_Tensorflow2.0 - Fatal编程技术网

Python 如何减少训练过程中模型损失的方差?

Python 如何减少训练过程中模型损失的方差?,python,tensorflow,neural-network,gradient-descent,tensorflow2.0,Python,Tensorflow,Neural Network,Gradient Descent,Tensorflow2.0,我知道随机梯度下降总是给出不同的结果。目前减少这种差异的最佳做法是什么? 我试图用两种不同的方法来预测一个简单的函数,每次我训练它们时,我都会看到非常不同的结果 输入数据: def plot(model_out): fig, ax = plt.subplots() ax.grid(True, which='both') ax.axhline(y=0, color='k', linewidth=1) ax.axvline(x=0, color='k', linewidth=1)

我知道随机梯度下降总是给出不同的结果。目前减少这种差异的最佳做法是什么? 我试图用两种不同的方法来预测一个简单的函数,每次我训练它们时,我都会看到非常不同的结果

输入数据:

def plot(model_out):
  fig, ax = plt.subplots()
  ax.grid(True, which='both')
  ax.axhline(y=0, color='k', linewidth=1)
  ax.axvline(x=0, color='k', linewidth=1)

  ax.plot(x_line, y_line, c='g', linewidth=1)
  ax.scatter(inputs, targets, c='b', s=8)
  ax.scatter(inputs, model_out, c='r', s=8)

a = 5.0; b = 3.0; x_left, x_right = -16., 16.
NUM_EXAMPLES = 200
noise   = tf.random.normal((NUM_EXAMPLES,1))

inputs  = tf.random.uniform((NUM_EXAMPLES,1), x_left, x_right)
targets = a * tf.sin(inputs) + b + noise
x_line  = tf.linspace(x_left, x_right, 500)
y_line  = a * tf.sin(x_line) + b
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.01))
model.fit(inputs, targets, batch_size=200, epochs=2000, verbose=0)

print(model.evaluate(inputs, targets, verbose=0))
plot(model.predict(inputs))
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

optimizer = tf.keras.optimizers.Adam(0.01)

@tf.function
def train_step(inpt, targ):
  with tf.GradientTape() as g:
    model_out = model(inpt)
    model_loss = tf.reduce_mean(tf.square(tf.math.subtract(targ, model_out)))

  gradients = g.gradient(model_loss, model.trainable_variables)
  optimizer.apply_gradients(zip(gradients, model.trainable_variables))
  return model_loss

train_ds = tf.data.Dataset.from_tensor_slices((inputs, targets))
train_ds = train_ds.repeat(2000).batch(200)

def train(train_ds):
  for inpt, targ in train_ds:
    model_loss = train_step(inpt, targ)
  tf.print(model_loss)

train(train_ds)
plot(tf.squeeze(model(inputs)))
Keras培训:

def plot(model_out):
  fig, ax = plt.subplots()
  ax.grid(True, which='both')
  ax.axhline(y=0, color='k', linewidth=1)
  ax.axvline(x=0, color='k', linewidth=1)

  ax.plot(x_line, y_line, c='g', linewidth=1)
  ax.scatter(inputs, targets, c='b', s=8)
  ax.scatter(inputs, model_out, c='r', s=8)

a = 5.0; b = 3.0; x_left, x_right = -16., 16.
NUM_EXAMPLES = 200
noise   = tf.random.normal((NUM_EXAMPLES,1))

inputs  = tf.random.uniform((NUM_EXAMPLES,1), x_left, x_right)
targets = a * tf.sin(inputs) + b + noise
x_line  = tf.linspace(x_left, x_right, 500)
y_line  = a * tf.sin(x_line) + b
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.01))
model.fit(inputs, targets, batch_size=200, epochs=2000, verbose=0)

print(model.evaluate(inputs, targets, verbose=0))
plot(model.predict(inputs))
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

optimizer = tf.keras.optimizers.Adam(0.01)

@tf.function
def train_step(inpt, targ):
  with tf.GradientTape() as g:
    model_out = model(inpt)
    model_loss = tf.reduce_mean(tf.square(tf.math.subtract(targ, model_out)))

  gradients = g.gradient(model_loss, model.trainable_variables)
  optimizer.apply_gradients(zip(gradients, model.trainable_variables))
  return model_loss

train_ds = tf.data.Dataset.from_tensor_slices((inputs, targets))
train_ds = train_ds.repeat(2000).batch(200)

def train(train_ds):
  for inpt, targ in train_ds:
    model_loss = train_step(inpt, targ)
  tf.print(model_loss)

train(train_ds)
plot(tf.squeeze(model(inputs)))

手工培训:

def plot(model_out):
  fig, ax = plt.subplots()
  ax.grid(True, which='both')
  ax.axhline(y=0, color='k', linewidth=1)
  ax.axvline(x=0, color='k', linewidth=1)

  ax.plot(x_line, y_line, c='g', linewidth=1)
  ax.scatter(inputs, targets, c='b', s=8)
  ax.scatter(inputs, model_out, c='r', s=8)

a = 5.0; b = 3.0; x_left, x_right = -16., 16.
NUM_EXAMPLES = 200
noise   = tf.random.normal((NUM_EXAMPLES,1))

inputs  = tf.random.uniform((NUM_EXAMPLES,1), x_left, x_right)
targets = a * tf.sin(inputs) + b + noise
x_line  = tf.linspace(x_left, x_right, 500)
y_line  = a * tf.sin(x_line) + b
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.01))
model.fit(inputs, targets, batch_size=200, epochs=2000, verbose=0)

print(model.evaluate(inputs, targets, verbose=0))
plot(model.predict(inputs))
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(50, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(1))

optimizer = tf.keras.optimizers.Adam(0.01)

@tf.function
def train_step(inpt, targ):
  with tf.GradientTape() as g:
    model_out = model(inpt)
    model_loss = tf.reduce_mean(tf.square(tf.math.subtract(targ, model_out)))

  gradients = g.gradient(model_loss, model.trainable_variables)
  optimizer.apply_gradients(zip(gradients, model.trainable_variables))
  return model_loss

train_ds = tf.data.Dataset.from_tensor_slices((inputs, targets))
train_ds = train_ds.repeat(2000).batch(200)

def train(train_ds):
  for inpt, targ in train_ds:
    model_loss = train_step(inpt, targ)
  tf.print(model_loss)

train(train_ds)
plot(tf.squeeze(model(inputs)))

尝试在AI Stack Exchange中发布您的问题:Aragon S,好的,谢谢。增加批大小直接减少VAFA,因为目标是使用小数据集。我在数据集中有
200个
示例和
batch\u size=200个
;在没有进一步说明的情况下,损失差异与再现性有关——否则,您需要定义损失波动的内容(超参数、重复运行等)。另外,在AI Stack Exchange中显示一些丢失的绘图张贴您的问题:Aragon S,好的,谢谢。增加批大小直接减少VAF,因为目标是使用小数据集。我在数据集中有
200个
示例和
batch\u size=200个
;在没有进一步说明的情况下,损失差异与再现性有关——否则,您需要定义损失波动的内容(超参数、重复运行等)。还显示一些损失图