Tensorflow 测试数据精度评定中的张量流误差

Tensorflow 测试数据精度评定中的张量流误差,tensorflow,Tensorflow,我使用了一个包含272个特性的数据集,其中包含1770个示例和3个类,我能够训练数据,但是在使用测试数据的准确性时,我遇到了一个错误。我的代码哪里出错了 InvalidArgumentError (see above for traceback): Incompatible shapes: [770] vs. [1000][[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/cpu:0"](ArgM

我使用了一个包含272个特性的数据集,其中包含1770个示例和3个类,我能够训练数据,但是在使用测试数据的准确性时,我遇到了一个错误。我的代码哪里出错了

InvalidArgumentError (see above for traceback): Incompatible shapes: [770] 
vs. [1000][[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/cpu:0"](ArgMax, ArgMax_1)]]
这是我的密码:

import tensorflow as tf
import pandas as pd
import numpy as np
from random import shuffle

training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
  filename="35.csv",
  target_dtype=np.int32,
  features_dtype=np.float32)


n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=3
batch_size=100


data=np.array(training_set.data)

training_data=data[:1000]
#print(np.shape(training_data))
test_data=data[1000:]
#print(np.shape(test_data))
target=np.array(training_set.target)

training_target=target[:1000]
#print(np.shape(training_target))

test_target=target[1000:]
#print(np.shape(test_target))

y_labels=[]
for i in training_target:
  if i==0:
    y_labels.append([1,0,0])
  elif i==1:
    y_labels.append([0,1,0])
  else:
    y_labels.append([0,0,1])

training_target=y_labels
#print(target)
x_labels=[]
for i in test_target:
 if i==0:
    x_labels.append([1,0,0])
 elif i==1:
    x_labels.append([0,1,0])
 else:
    x_labels.append([0,0,1])
test_target=y_labels

x=tf.placeholder('float',[None,272])
y=tf.placeholder('float')

def neural_network_model(data):
 hidden_1_layer={'weights':tf.Variable(tf.random_normal([272,n_nodes_hl1])),
                'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
 hidden_2_layer=
      {'weights':tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
                'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
 hidden_3_layer=
      {'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
                'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
 output_layer=
      {'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),
                'biases':tf.Variable(tf.random_normal([n_classes]))}                    




l1=tf.add(tf.matmul(data,hidden_1_layer['weights']),
     hidden_1_layer['biases'])
l1=tf.nn.relu(l1)

l2=tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])
l2=tf.nn.relu(l2)

l3=tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_3_layer['biases'])
l3=tf.nn.relu(l3)

output=tf.matmul(l3,output_layer['weights'])+output_layer['biases']
return output

def train_neural_network(x):
 prediction=neural_network_model(x)


 cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits
            (logits=prediction,labels=y))

 optimizer=tf.train.AdamOptimizer().minimize(cost)
 hm_epochs=20
 with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(hm_epochs):
        epoch_loss=0
        i=0
        while(i<len(training_data[0])):
            start=i
            end=i+batch_size


            batch_x=np.array(training_data[start:end])

            batch_y=np.array(training_target[start:end])

            _,c=sess.run([optimizer,cost], feed_dict={x:batch_x,y:batch_y})
            epoch_loss+=c
            i+=batch_size
        print('Epoch',epoch,'completed out of',hm_epochs,'loss',epoch_loss)
    correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
    accuracy=tf.reduce_mean(tf.cast(correct,'float'))
    print('Accuracy:',accuracy.eval(session=sess,feed_dict=
                       {x:test_data,y:test_target}))
train_neural_network(x)
将tensorflow导入为tf
作为pd进口熊猫
将numpy作为np导入
从随机导入洗牌
training\u set=tf.contrib.learn.datasets.base.load\u csv\u不带\u头(
filename=“35.csv”,
target_dtype=np.int32,
特征(数据类型=np.32)
n_节点_hl1=500
n_节点_hl2=500
n_节点\u hl3=500
n_类=3
批量大小=100
数据=np.数组(训练集数据)
培训数据=数据[:1000]
#打印(np.形状(训练数据))
测试数据=数据[1000:]
#打印(np.形状(测试数据))
目标=np.数组(训练集目标)
培训目标=目标[:1000]
#打印(np.形状(训练目标))
测试目标=目标[1000:]
#打印(np.形状(测试目标))
y_标签=[]
对于我的培训目标:
如果i==0:
y_标签。追加([1,0,0])
elif i==1:
y_标签。追加([0,1,0])
其他:
y_标签。追加([0,0,1])
培训目标=y标签
#打印(目标)
x_标签=[]
对于测试_目标中的i:
如果i==0:
x_标签。追加([1,0,0])
elif i==1:
x_标签。追加([0,1,0])
其他:
x_标签。追加([0,0,1])
测试目标=y标签
x=tf.placeholder('float',[None,272])
y=tf.占位符('float')
def神经网络模型(数据):
hidden_1_layer={'weights':tf.Variable(tf.random_normal([272,n_nodes_hl1]),
“偏差”:tf.Variable(tf.random_normal([n_nodes_hl1]))
隐藏层=
{'weights':tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2]),
“偏差”:tf.Variable(tf.random_normal([n_nodes_hl2]))
隐藏3层=
{'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3]),
“偏差”:tf.Variable(tf.random_normal([n_nodes_hl3]))
输出层=
{'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_classes]),
“偏差”:tf.Variable(tf.random_normal([n_类]))
l1=tf.add(tf.matmul(数据,隐藏层['weights')),
隐藏的_1_层[“偏差])
l1=tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1,隐藏的\u 2\u层['权重]),隐藏的\u 2\u层['偏差])
l2=tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2,隐藏层['weights')),隐藏层['biases'))
l3=tf.nn.relu(l3)
输出=tf.matmul(l3,输出层['weights'])+输出层['biases']
返回输出
def系列神经网络(x):
预测=神经网络模型(x)
成本=tf.减少平均值(tf.nn.softmax\u交叉熵)
(logits=prediction,labels=y))
优化器=tf.train.AdamOptimizer()。最小化(成本)
hm_时代=20
使用tf.Session()作为sess:
sess.run(tf.global\u variables\u initializer())
对于范围内的历元(hm_历元):
历元损失=0
i=0

你能格式化你的错误信息吗?是的,我已经格式化了。可以吗?