Python 在TensorFlow中,如何向session.run()提供2个参数

Python 在TensorFlow中,如何向session.run()提供2个参数,python,machine-learning,tensorflow,Python,Machine Learning,Tensorflow,我正在尝试进入TensorFlow,并尝试对初学者的示例进行一些更改 我正试图与你结合 我使用X,y=sklearn.datasets.make_moons(50,noise=0.20)获取数据。基本上,这条线给出了2D X(,)和2类Y(0/1) 网络的结构与专家的深度MNIST相同。不同之处在于会话运行功能 sess.run(train_step, feed_dict={x:X, y_:y}) 但这给了我们一个机会 _ValueError: setting an array element

我正在尝试进入TensorFlow,并尝试对初学者的示例进行一些更改

我正试图与你结合

我使用
X,y=sklearn.datasets.make_moons(50,noise=0.20)
获取数据。基本上,这条线给出了2D X(,)和2类Y(0/1)

网络的结构与专家的深度MNIST相同。不同之处在于会话运行功能

sess.run(train_step, feed_dict={x:X, y_:y})
但这给了我们一个机会

_ValueError: setting an array element with a sequence._
有人能给我一些关于这个问题的提示吗?这是代码

import numpy as np
import matplotlib
import tensorflow as tf
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
sess = tf.InteractiveSession()
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
np.random.seed(0)
X, y = sklearn.datasets.make_moons(50, noise=0.20)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
batch_xs = np.vstack([np.expand_dims(k,0) for k in X])
x = tf.placeholder(tf.float32, shape=[50,2])
y_ = tf.placeholder(tf.float32, shape=[50,2])
W = tf.Variable(tf.zeros([2,2]))
b = tf.Variable(tf.zeros([2]))
a = np.arange(100).reshape((50, 2))
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
sess.run(tf.initialize_all_variables())
for i in range(20000):
sess.run(train_step, feed_dict={x:X, y_:y})

以下是与TensorFlow抗争后的正确代码:

# Package imports
import numpy as np
import matplotlib
import tensorflow as tf
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model

rng = np.random

input_dim = 2
output_dim = 2
hidden_dim = 3

np.random.seed(0)
Train_X, Train_Y = sklearn.datasets.make_moons(200, noise=0.20)
Train_X = np.reshape(Train_X, (-1,2))
Train_YY = []  
for i in Train_Y:       #making Train_Y a 2-D list
    if i == 1:
        Train_YY.append([1,0])
    else:
        Train_YY.append([0,1])
print Train_YY
X = tf.placeholder("float",shape=[None,input_dim])
Y = tf.placeholder("float")
W1 = tf.Variable(tf.random_normal([input_dim, hidden_dim], stddev=0.35),
                      name="weights")
b1 = tf.Variable(tf.zeros([1,hidden_dim]), name="bias1")
a1 = tf.tanh(tf.add(tf.matmul(X,W1),b1))
W2 = tf.Variable(tf.random_normal([hidden_dim,output_dim]), name="weight2")
b2 = tf.Variable(tf.zeros([1,output_dim]), name="bias2")
a2 = tf.add(tf.matmul(a1, W2), b2)
output=tf.nn.softmax(a2)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cross_entropy = -tf.reduce_sum(Y*tf.log(output))
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        # for (a,d) in zip(Train_X, Train_Y):
        training_cost = sess.run(optimizer, feed_dict={X:Train_X, Y:Train_YY})
        if i%1000 == 0:
            # print "Training cost=", training_cost, "W1=", W1.eval(), "b1=", b1.eval(),"W2=", W2.eval(), "b2=", b2.eval()
            # print output.eval({X:Train_X, Y:Train_YY})
            # print cross_entropy.eval({X:Train_X, Y:Train_YY})
            print "Accuracy = ", accuracy.eval({X:Train_X, Y:Train_YY}) 

出现问题的原因是您在以下行中重新定义了
y

y = tf.nn.softmax(tf.matmul(x,W) + b)
TensorFlow随后给出了一个错误,因为在
提要中的
y:y
将用另一个张量来馈送一个张量,这是不可能的(即使是这样,这个特定提要也会创建循环依赖!)

解决方案是重写softmax和cross-ops:

y_softmax = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_softmax))

哦,多么愚蠢的错误。但另一个问题提出了“ValueError:Cannotfeed value of shape(50,)for Tensor u”占位符:0”,该占位符有shape(维度(50),维度(2))`,通过多次查找其他答案修复了此问题
y_softmax = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_softmax))