Machine learning 在TensorFlow中用ReLUs建立非线性模型

Machine learning 在TensorFlow中用ReLUs建立非线性模型,machine-learning,tensorflow,non-linear-regression,Machine Learning,Tensorflow,Non Linear Regression,我试图在TensorFlow中建立一个简单的非线性模型。我创建了以下示例数据: x_data = np.arange(-100, 100).astype(np.float32) y_data = np.abs(x_data + 20.) 我想这个形状应该可以很容易地用几个ReLUs来重建,但我不知道如何重建 到目前为止,我的方法是使用ReLUs包装线性组件,但这并没有运行: W1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) W2 = t

我试图在TensorFlow中建立一个简单的非线性模型。我创建了以下示例数据:

x_data = np.arange(-100, 100).astype(np.float32)
y_data = np.abs(x_data + 20.) 

我想这个形状应该可以很容易地用几个ReLUs来重建,但我不知道如何重建

到目前为止,我的方法是使用ReLUs包装线性组件,但这并没有运行:

W1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([1]))
b2 = tf.Variable(tf.zeros([1]))

y = tf.nn.relu(W1 * x_data + b1) + tf.nn.relu(W2 * x_data + b2)

关于如何使用TensorFlow中的ReLUs来表示该模型,有什么想法吗?

这里是一个简单的前馈网络,有一个隐藏层

import numpy as np
import tensorflow as tf

episodes = 55
batch_size = 5
hidden_units = 10
learning_rate = 1e-3

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# normalize the data and shuffle them
x_data = np.arange(0, 1, 0.005).astype(float)
np.random.shuffle(x_data)
y_data = np.abs(x_data + .1)

# reshape data ...
x_data = x_data.reshape(200, 1)
y_data = y_data.reshape(200, 1)

# create placeholders to pass the data to the model
x = tf.placeholder('float', shape=[None, 1])
y_ = tf.placeholder('float', shape=[None, 1])

W1 = weight_variable([1, hidden_units])
b1 = bias_variable([hidden_units])
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)

W2 = weight_variable([hidden_units, 1])
b2 = bias_variable([1])
y = tf.matmul(h1, W2) + b2

mean_square_error = tf.reduce_sum(tf.square(y-y_))
training = tf.train.AdamOptimizer(learning_rate).minimize(mean_square_error)

sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())

for _ in xrange(episodes):
    # iterrate trough every row (with batch size of 1)
    for i in xrange(x_data.shape[0]-batch_size+1):
        _, error = sess.run([training, mean_square_error],  feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
        #print error
        print error, x_data[i:i+batch_size], y_data[i:i+batch_size]


error = sess.run([training, mean_square_error],  feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
print error

我想你是在问如何在工作模型中结合ReLUs?两个选项如下所示:

选项1)将ReLU1输入ReLU2

这可能是首选的方法。请注意,r1是r2的输入

选项2)添加ReLU1和ReLU2

选项2列在原始问题中,但我不知道这是否是您真正想要的…请阅读下面的完整工作示例并尝试。我想你会发现它的模型不好

x = tf.placeholder('float', shape=[None, 1])
y_ = tf.placeholder('float', shape=[None, 1])

W1 = weight_variable([1, hidden_units])
b1 = bias_variable([hidden_units])
r1 = tf.nn.relu(tf.matmul(x, W1) + b1)

# Add r1 to r2 -- won't be able to reduce the error.
W2 = weight_variable([1, hidden_units])
b2 = bias_variable([hidden_units])
r2 = tf.nn.relu(tf.matmul(x, W2) + b2)
y = tf.add(r1,r2)  # Again, ReLU2 is just y
完整的工作示例

下面是一个完整的工作示例。默认情况下,它使用选项1,但是,选项2也包含在注释中

 from __future__ import print_function
 import tensorflow as tf
 import numpy as np
 import matplotlib.pyplot as plt

 # Config the matlotlib backend as plotting inline in IPython
 %matplotlib inline


 episodes = 55
 batch_size = 5
 hidden_units = 10
 learning_rate = 1e-3

 def weight_variable(shape):
     initial = tf.truncated_normal(shape, stddev=0.1)
     return tf.Variable(initial)

 def bias_variable(shape):
     initial = tf.constant(0.1, shape=shape)
     return tf.Variable(initial)


 # Produce the data
 x_data = np.arange(-100, 100).astype(np.float32)
 y_data = np.abs(x_data + 20.)

 # Plot it.
 plt.plot(y_data)
 plt.ylabel('y_data')
 plt.show()

 # Might want to randomize the data
 # np.random.shuffle(x_data)
 # y_data = np.abs(x_data + 20.)

 # reshape data ...
 x_data = x_data.reshape(200, 1)
 y_data = y_data.reshape(200, 1)

 # create placeholders to pass the data to the model
 x = tf.placeholder('float', shape=[None, 1])
 y_ = tf.placeholder('float', shape=[None, 1])

 W1 = weight_variable([1, hidden_units])
 b1 = bias_variable([hidden_units])
 r1 = tf.nn.relu(tf.matmul(x, W1) + b1)

 # Input of r1 into r2 (which is just y)
 W2 = weight_variable([hidden_units, 1])
 b2 = bias_variable([1])
 y = tf.nn.relu(tf.matmul(r1,W2)+b2) 

 # OPTION 2 
 # Add r1 to r2 -- won't be able to reduce the error.
 #W2 = weight_variable([1, hidden_units])
 #b2 = bias_variable([hidden_units])
 #r2 = tf.nn.relu(tf.matmul(x, W2) + b2)
 #y = tf.add(r1,r2)


 mean_square_error = tf.reduce_sum(tf.square(y-y_))
 training = tf.train.AdamOptimizer(learning_rate).minimize(mean_square_error)

 sess = tf.InteractiveSession()
 sess.run(tf.initialize_all_variables())

 min_error = np.inf
 for _ in range(episodes):
     # iterrate trough every row (with batch size of 1)
     for i in range(x_data.shape[0]-batch_size+1):
         _, error = sess.run([training, mean_square_error],  feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
         if error < min_error :
             min_error = error
             if min_error < 3:
                 print(error)
         #print(error)
         #print(error, x_data[i:i+batch_size], y_data[i:i+batch_size])


 # error = sess.run([training, mean_square_error],  feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
 # if error != None:
 #    print(error)


 sess.close()

 print("\n\nmin_error:",min_error)
from\uuuuu future\uuuuu导入打印功能
导入tensorflow作为tf
将numpy作为np导入
将matplotlib.pyplot作为plt导入
#在IPython中将matlotlib后端配置为内联打印
%matplotlib内联
剧集=55集
批量大小=5
隐藏单位=10
学习率=1e-3
def重量_变量(形状):
初始值=tf.截断的_法线(形状,标准差=0.1)
返回tf.变量(初始值)
def偏差_变量(形状):
初始=tf.常数(0.1,形状=形状)
返回tf.变量(初始值)
#生成数据
x_data=np.arange(-100100).astype(np.float32)
y_数据=np.abs(x_数据+20.)
#画出来。
plt.绘图(y_数据)
plt.ylabel('y_data')
plt.show()
#可能需要将数据随机化
#np.random.shuffle(x_数据)
#y_数据=np.abs(x_数据+20.)
#重塑数据。。。
x_数据=x_数据。重塑(200,1)
y_数据=y_数据。重塑(200,1)
#创建占位符以将数据传递给模型
x=tf.placeholder('float',shape=[None,1])
占位符('float',shape=[None,1])
W1=重量_变量([1,隐藏单位])
b1=偏差变量([隐藏单位])
r1=tf.nn.relu(tf.matmul(x,W1)+b1)
#将r1输入r2(仅为y)
W2=权重变量([隐藏单位,1])
b2=偏差_变量([1])
y=tf.nn.relu(tf.matmul(r1,W2)+b2)
#选择2
#将r1添加到r2——将无法减少错误。
#W2=权重变量([1,隐藏单位])
#b2=偏差变量([隐藏单位])
#r2=tf.nn.relu(tf.matmul(x,W2)+b2)
#y=tf.add(r1,r2)
均方误差=tf.减和(tf.平方(y-y))
训练=tf.train.AdamOptimizer(学习率)。最小化(均方误差)
sess=tf.InteractiveSession()
sess.run(tf.initialize\u all\u variables())
最小错误=np.inf
对于范围内的(剧集):
#每行iterrate槽(批量为1)
对于范围内的i(x_数据.形状[0]-批次大小+1):
_,error=sess.run([training,mean_square_error],feed_dict={x:x_data[i:i+batch_size],y_:y_data[i:i+batch_size]})
如果错误<最小错误:
最小错误=错误
如果最小误差<3:
打印(错误)
#打印(错误)
#打印(错误,x_数据[i:i+批次大小],y_数据[i:i+批次大小])
#error=sess.run([training,mean_square_error],feed_dict={x:x_data[i:i+batch_size],y_:y_data[i:i+batch_size]})
#如果出现错误!=无:
#打印(错误)
sess.close()
打印(“\n\n最小错误:”,最小错误)

在jupiter笔记本中可能更容易看到,它受到了所有回答的启发,我在接受的答案中使用了提议的模型,从而训练了这个模型。代码如下:

import tensorflow as tf
import numpy as np

# Create 200 x, y data points in NumPy to represent the function
x_data = np.arange(-100, 100).astype(np.float32)
y_data = np.abs(x_data + 20.) 

W1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([1]))
W2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b2 = tf.Variable(tf.zeros([1]))
y = tf.nn.relu(W1 * x_data + b1) + tf.nn.relu(W2 * x_data + b2)

# Minimize the mean squared errors.
mean_square_error = tf.reduce_sum(tf.square(y-y_data))
train = tf.train.AdamOptimizer(learning_rate).minimize(mean_square_error)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
# Fit the non-linear function.
for step in xrange(50000):
   sess.run(train)
   if step % 10000 == 0:
       #Expected values: W1 = 1., W2 = -1., b1 = 20., b2 = -20.
       print(step, sess.run(W1), sess.run(b1), sess.run(W2), sess.run(b2))

嗯,我想这并不能回答我的问题。我想问的是如何使用ReLUs对一个特定的函数进行建模,
W1,W2,b1,b2=1.,-1,20.,-20.
它应该准确地给出您的数据(不知道这是否是您的问题)。你说“这没用”是什么意思?是的,你是对的。我的问题是我使用了错误的模型:GradientDescentOptimizer(0.5),它返回“InvalidArgumentError:ReluGrad输入不是有限的:Tensor有Inf和NaN值”。使用提出的模型:AdamOptimizer(学习率),我能够将两个RELU训练到预期值。谢谢你的帮助。我认为当建立模型时,张力板也会很有帮助;但是,文档有点难以理解。在添加“tf.reset\u default\u graph()”之前,我一直在出错,文档中没有列出这一点。裁判:
import tensorflow as tf
import numpy as np

# Create 200 x, y data points in NumPy to represent the function
x_data = np.arange(-100, 100).astype(np.float32)
y_data = np.abs(x_data + 20.) 

W1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([1]))
W2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b2 = tf.Variable(tf.zeros([1]))
y = tf.nn.relu(W1 * x_data + b1) + tf.nn.relu(W2 * x_data + b2)

# Minimize the mean squared errors.
mean_square_error = tf.reduce_sum(tf.square(y-y_data))
train = tf.train.AdamOptimizer(learning_rate).minimize(mean_square_error)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
# Fit the non-linear function.
for step in xrange(50000):
   sess.run(train)
   if step % 10000 == 0:
       #Expected values: W1 = 1., W2 = -1., b1 = 20., b2 = -20.
       print(step, sess.run(W1), sess.run(b1), sess.run(W2), sess.run(b2))