Machine learning NaN的Tensorflow多元线性回归结果

Machine learning NaN的Tensorflow多元线性回归结果,machine-learning,scikit-learn,tensorflow,linear-regression,Machine Learning,Scikit Learn,Tensorflow,Linear Regression,我正在使用sklearn的波士顿住房数据集(506x13矩阵)进行多元线性回归。我计划使用所有数据对其进行训练,然后“插入”随机数据,如boston\u dataset.data[39],然后查看损失。但是当我打印结果时,我得到的是NaN。这是我的密码 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_boston np.set

我正在使用sklearn的波士顿住房数据集(506x13矩阵)进行多元线性回归。我计划使用所有数据对其进行训练,然后“插入”随机数据,如
boston\u dataset.data[39]
,然后查看损失。但是当我打印结果时,我得到的是
NaN
。这是我的密码

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston

np.set_printoptions(suppress=True)

boston = load_boston()

m = boston.data.shape[0] - 1

bt_unfixed = np.transpose(boston.data)
bt = np.insert(bt_unfixed, 0, 1)

Y = tf.placeholder(tf.float64, name='Y___')
X = tf.placeholder(tf.float64, [1, 13], name='X_____')
#print X.shape
W = tf.Variable(tf.zeros([13, 1]), name='weights')
b = tf.Variable(0.5, name='bias')

hypothesis = tf.add(tf.matmul(X, tf.cast(W, tf.float64)), tf.cast(b, tf.float64))

loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2 * m)

optimizer = tf.train.GradientDescentOptimizer(0.01)

train_op = optimizer.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(0, 500):
        for (x, y) in zip(boston.data, boston.target):
            sess.run(train_op, feed_dict={X:x.reshape(1, 13), Y:y})
        if (i + 1)%50 == 0:
            print "Ran " + str(i) + "times\nW=" +str(sess.run(W)) + "\nb=" +str(sess.run(b))

    print "Done!\n"
    print "Running test...\n"
    t = sess.run(cost, feed_dict={X:boston.data[504], Y:boston.target.data[504]})
    print "loss =" + str(t) + "Real value" + str(boston.target.data[504]) + "Pred " +str(sess.run(hypothesis, feed_dict={X:boston.data[504]}))

谢谢大家!!另外,请随意添加任何建议

似乎您没有对波士顿数据进行任何数据预处理,这会使损失和假设值转到inf(NaN)。所以我对数据进行了规范化,并且它是有效的。这是我的密码

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston

np.set_printoptions(suppress=True)

boston = load_boston()

m = boston.data.shape[0] - 1

bt_unfixed = np.transpose(boston.data)
bt = np.insert(bt_unfixed, 0, 1)

Y = tf.placeholder(tf.float64, name='Y___')
X = tf.placeholder(tf.float64, [1, 13], name='X_____')
#print X.shape
W = tf.Variable(tf.zeros([13, 1]), name='weights')
b = tf.Variable(0.5, name='bias')

hypothesis = tf.add(tf.matmul(X, tf.cast(W, tf.float64)), tf.cast(b, tf.float64))

loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2 * m)

optimizer = tf.train.GradientDescentOptimizer(0.01)

train_op = optimizer.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(0, 500):
        for (x, y) in zip(boston.data, boston.target):
            sess.run(train_op, feed_dict={X:x.reshape(1, 13), Y:y})
        if (i + 1)%50 == 0:
            print "Ran " + str(i) + "times\nW=" +str(sess.run(W)) + "\nb=" +str(sess.run(b))

    print "Done!\n"
    print "Running test...\n"
    t = sess.run(cost, feed_dict={X:boston.data[504], Y:boston.target.data[504]})
    print "loss =" + str(t) + "Real value" + str(boston.target.data[504]) + "Pred " +str(sess.run(hypothesis, feed_dict={X:boston.data[504]}))
将tensorflow导入为tf
将numpy作为np导入
将matplotlib.pyplot作为plt导入
从sklearn.dataset导入加载
波士顿=加载波士顿()
data=boston.data
label=boston.target
#标准化数据
数据-=np.平均值(数据,轴=0)
数据/=np.std(数据,轴=0)
M=boston.data.shape[0]
Y=tf.placeholder(tf.float32,name='Y')
X=tf.placeholder(tf.float32[1,13],name='X')
W=tf.Variable(tf.random_normal([13,1]),name='weights')
b=tf.Variable(tf.random_normal([1]),name='bias')
假设=tf.add(tf.matmul(X,W),b)
损失=tf.减和(tf.平方(假设-Y))/(2.*(M-1))
优化器=tf.train.GradientDescentOptimizer(0.01)
列车运行=优化器。最小化(损失)
使用tf.Session()作为sess:
sess.run(tf.initialize\u all\u variables())
对于范围(0,500)内的i:
对于X范围内的l(M):
_,损耗值,hypo=sess.run(
[训练、损失、假设],
feed_dict={X:data[l,:].重新整形([1,13]),
Y:标签[l]})
如果(i+1)%50==0:
打印“Ran”+str(i)+“times\nW=“+\
str(sess.run(W))+“\nb=“+str(sess.run(b))
打印“完成!\n”
打印“正在运行测试…\n”
t=sess.run(
丢失,feed_dict={X:data[50]。重新整形([1,13]),
Y:标签[50]})
打印“损失=”+str(t)
打印“实际值Y:+str(标签[50])
打印“Pred Y:”+str(sess.run)(假设,
feed_dict={X:data[50]。重塑([1,13]))