Python 3.x Python:在使用TensorFlow的神经网络中,成本不断增加

Python 3.x Python:在使用TensorFlow的神经网络中,成本不断增加,python-3.x,tensorflow,neural-network,Python 3.x,Tensorflow,Neural Network,我试图用TensorFlow创建一个神经网络,但我的成本一直在增加。 这是我目前的代码: class AI_core: def __init__(self, nodes_in_each_layer): self.data_in_placeholder = tf.placeholder("float", [None, nodes_in_each_layer[0]]) self.data_out_placeholder = tf.placeholder("fl

我试图用TensorFlow创建一个神经网络,但我的成本一直在增加。 这是我目前的代码:

class AI_core:
    def __init__(self, nodes_in_each_layer):
        self.data_in_placeholder = tf.placeholder("float", [None, nodes_in_each_layer[0]])
        self.data_out_placeholder = tf.placeholder("float")
        self.init_neural_network(nodes_in_each_layer)

    def init_neural_network(self, n_nodes_h):
        #n_nodes_h contains the number of nodes for each layer
        #n_nodes_h[0] = number of inputs
        #n_nodes_h[-1] = number of outputs
        self.layers = [None for i in range(len(n_nodes_h)-1)]
        for i in range(1, len(n_nodes_h)):
            self.layers[i-1] = {"weights":tf.Variable(tf.random_normal([n_nodes_h[i-1], n_nodes_h[i]])),
            "biases":tf.Variable(tf.random_normal([n_nodes_h[i]]))}

    def neural_network_model(self, data):
        for i in range(len(self.layers)):
            data = tf.matmul(data, self.layers[i]["weights"]) + self.layers[i]["biases"]
            if i != len(self.layers):
                data = tf.nn.relu(data)
        return data

    def train_neural_network(self, data):
        prediction = self.neural_network_model(self.data_in_placeholder)
        cost = tf.reduce_mean(tf.square(self.data_out_placeholder-prediction))
        optimiser = tf.train.GradientDescentOptimizer(learning_rate=0.0001).minimize(cost)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            epoch_loss = 0
            for _ in range(int(data.length)):
                epoch_x, epoch_y = data.next_batch()
                c = sess.run(cost, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y})
                _ = sess.run(optimiser, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y})
                epoch_loss += np.sum(c)

                print("loss =", epoch_loss)
现在,我正试图使网络近似于math.sin函数。 我已经将每个层中的节点设置为[1,5,5,1],批处理大小设置为3。这是输出:

loss = 0.8417138457298279
loss = 1.190976768732071
loss = 1.8150676786899567
loss = 2.433938592672348
loss = 3.092040628194809
loss = 3.478498786687851
loss = 3.7894928753376007
loss = 4.598285228013992
loss = 5.418278068304062
loss = 5.555390268564224

看起来您一直在增加以前迭代中的损失值

with tf.Session() as sess: sess.run(tf.global_variables_initializer()) epoch_loss = 0 for _ in range(int(data.length)): epoch_x, epoch_y = data.next_batch() c = sess.run(cost, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y}) _ = sess.run(optimiser, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y}) epoch_loss += np.sum(c) print("loss =", epoch_loss) 使用tf.Session()作为sess: sess.run(tf.global\u variables\u initializer()) 历元损失=0 对于范围内的(int(data.length)): 历元x,历元y=data.next\u batch() c=sess.run(cost,feed\u dict={self.data\u in\u占位符:epoch\u x,self.data\u out\u占位符:epoch\u y}) _=sess.run(Optimizer,feed_dict={self.data_in_占位符:epoch_x,self.data_out_占位符:epoch_y}) 历元损失+=np.和(c) 打印(“损失=,历元损失)
看起来您一直在增加以前迭代中的损失值

with tf.Session() as sess: sess.run(tf.global_variables_initializer()) epoch_loss = 0 for _ in range(int(data.length)): epoch_x, epoch_y = data.next_batch() c = sess.run(cost, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y}) _ = sess.run(optimiser, feed_dict={self.data_in_placeholder: epoch_x, self.data_out_placeholder: epoch_y}) epoch_loss += np.sum(c) print("loss =", epoch_loss) 使用tf.Session()作为sess: sess.run(tf.global\u variables\u initializer()) 历元损失=0 对于范围内的(int(data.length)): 历元x,历元y=data.next\u batch() c=sess.run(cost,feed\u dict={self.data\u in\u占位符:epoch\u x,self.data\u out\u占位符:epoch\u y}) _=sess.run(Optimizer,feed_dict={self.data_in_占位符:epoch_x,self.data_out_占位符:epoch_y}) 历元损失+=np.和(c) 打印(“损失=,历元损失)