Python 如何使用Tensorboard(在GoogleColab中)而不使用Keras(只是普通的Tensorflow 1)?(提供代码)

Python 如何使用Tensorboard(在GoogleColab中)而不使用Keras(只是普通的Tensorflow 1)?(提供代码),python,tensorflow,tensorboard,Python,Tensorflow,Tensorboard,我想在GoogleColab中使用Tensorboard可视化下面的TensorFlow1.x代码。现在的问题是,我能找到的所有示例和教程都包括Keras,我对Tensorflow和在Keras的model.fit()中添加回调=[tensorboard_callback]的知识不太了解 此代码是一种使用神经网络求解微分方程的方法,并基于此 我使用的代码实际上并没有在任何地方使用Keras,我也不确定是否有其他方法可以让Tensorboard为我工作。我不是Tensorflow/Keras方面的

我想在GoogleColab中使用Tensorboard可视化下面的TensorFlow1.x代码。现在的问题是,我能找到的所有示例和教程都包括Keras,我对Tensorflow和在Keras的model.fit()中添加回调=[tensorboard_callback]的知识不太了解

此代码是一种使用神经网络求解微分方程的方法,并基于此

我使用的代码实际上并没有在任何地方使用Keras,我也不确定是否有其他方法可以让Tensorboard为我工作。我不是Tensorflow/Keras方面的专家,因此我希望举个例子。谢谢大家!

为了回答这个问题,我还将代码放在本笔记本中:

首先要初始化网络:

import sys
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import matplotlib.pyplot as plt

def init_network(diffEqn_string, g_trial_string, g_analytic_string, param, param_tfconst_str, num_iter, num_hidden_neurons, learning_rate, t, activation_fn = tf.nn.sigmoid):
    
    
    # Just to reset the graph such that it is possible to rerun this in a
    # Jupyter cell without resetting the whole kernel.
    tf.reset_default_graph()

    # Set a seed to ensure getting the same results from every run
    tf.set_random_seed(4155)

    # Convert the values the trial solution is evaluated at to a tensor.
    t_tf = tf.convert_to_tensor(t.reshape(-1,1),dtype=tf.float64)
    zeros = tf.reshape(tf.convert_to_tensor(np.zeros(t.shape)),shape=(-1,1))
    
    # Define the parameters of the equation
    param = eval(param_tfconst_str)
            
    num_hidden_layers = np.size(num_hidden_neurons)
    
    # Construct the network.
    # tf.name_scope is used to group each step in the construction,
    # just for a more organized visualization in TensorBoard
    with tf.name_scope('dnn'):

        # Input layer
        previous_layer = t_tf

        # Hidden layers
        for l in range(num_hidden_layers):
            current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l], name='hidden%d'%(l+1), activation=activation_fn)
            previous_layer = current_layer

        # Output layer
        dnn_output = tf.layers.dense(previous_layer, 1, name='output')


    # Define the cost function such that d_g_trail = alpha*g_trial*(A - g_trial)
    with tf.name_scope('cost'):
        g_trial = eval(g_trial_string) #g0 + t_tf*dnn_output
        d_g_trial = tf.gradients(g_trial,t_tf)

        func = eval(diffEqn_string)
        
        #the first argument are the target labels for training. We want to mean squared error to be zero for all inputs.
        cost = tf.losses.mean_squared_error(zeros, func) 

    
    # Choose the method to minimize the cost function, along with a learning rate

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        training_op = optimizer.minimize(cost)


    # Define a node that initializes all of the other nodes in the computational graph
    # used by TensorFlow:
    init = tf.global_variables_initializer()
    return init, cost, g_trial, training_op
然后是run_会话()

然后运行代码并绘制随时间变化的成本输出

param = np.array([1, 1])
param_tfconst_str = "[tf.constant(param[0],dtype=tf.float64), tf.constant(param[1],dtype=tf.float64)]"
diffEqn_string = "d_g_trial[0] - param[1]*g_trial"
g_trial_string = "param[0] + t_tf*dnn_output"
g_analytic_string = "param[0]*tf.math.exp(param[1]*t)"
description = "f'=1*f; f(0)=1; sigmoid"
activation_fn = tf.nn.sigmoid

num_iter = 100

num_hidden_neurons = [4]
learning_rate = 0.1

Nt = 10
T0 = -3
T = 3
t = np.linspace(T0,T, Nt)

init, cost, g_trial, training_op = init_network(diffEqn_string, g_trial_string, g_analytic_string, param, param_tfconst_str, num_iter, num_hidden_neurons, learning_rate, t, activation_fn)

g_dnn_tf, costs_list = run_session(num_iter, init, cost, g_trial, training_op)

plt.plot(costs_list)
plt.yscale("log") 
plt.xscale("log")
param = np.array([1, 1])
param_tfconst_str = "[tf.constant(param[0],dtype=tf.float64), tf.constant(param[1],dtype=tf.float64)]"
diffEqn_string = "d_g_trial[0] - param[1]*g_trial"
g_trial_string = "param[0] + t_tf*dnn_output"
g_analytic_string = "param[0]*tf.math.exp(param[1]*t)"
description = "f'=1*f; f(0)=1; sigmoid"
activation_fn = tf.nn.sigmoid

num_iter = 100

num_hidden_neurons = [4]
learning_rate = 0.1

Nt = 10
T0 = -3
T = 3
t = np.linspace(T0,T, Nt)

init, cost, g_trial, training_op = init_network(diffEqn_string, g_trial_string, g_analytic_string, param, param_tfconst_str, num_iter, num_hidden_neurons, learning_rate, t, activation_fn)

g_dnn_tf, costs_list = run_session(num_iter, init, cost, g_trial, training_op)

plt.plot(costs_list)
plt.yscale("log") 
plt.xscale("log")