Python ValueError:变量<;tf.变量';张力仪/基本参数/可训练浮动32:0';形状=(1,)数据类型=浮动32>;对渐变有'None'

Python ValueError:变量<;tf.变量';张力仪/基本参数/可训练浮动32:0';形状=(1,)数据类型=浮动32>;对渐变有'None',python,python-3.x,tensorflow,neural-network,Python,Python 3.x,Tensorflow,Neural Network,我正试图用nengo和tensorflow,以及SNN模型来实现低出生率的预测\ 但是,我得到了以下值错误(在Anaconda中): 系统: python:3.7.7 tensorflow:2.2.0和2.1.0 嫩戈:3.0.0 nengo dl:3.2.0 我怎样才能解决这个问题 提前谢谢 Traceback (most recent call last): File "C:\Users\USER\NengoPRJ\nengo_lowbirth.py", line 9

我正试图用nengo和tensorflow,以及SNN模型来实现低出生率的预测\

但是,我得到了以下值错误(在Anaconda中):

系统:

python:3.7.7
tensorflow:2.2.0和2.1.0
嫩戈:3.0.0
nengo dl:3.2.0

我怎样才能解决这个问题

提前谢谢

Traceback (most recent call last):
  File "C:\Users\USER\NengoPRJ\nengo_lowbirth.py", line 95, in <module>
    sim.fit(train_data, {out_p: train_labels}, epochs=epochs)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo\utils\magic.py", line 181, in __call__
    return self.wrapper(self.__wrapped__, self.instance, args, kwargs)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo_dl\simulator.py", line 66, in require_open
    return wrapped(*args, **kwargs)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo_dl\simulator.py", line 869, in fit
    "fit", x=x, y=y, n_steps=n_steps, stateful=stateful, **kwargs
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo\utils\magic.py", line 181, in __call__
    return self.wrapper(self.__wrapped__, self.instance, args, kwargs)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo_dl\simulator.py", line 50, in with_self
    output = wrapped(*args, **kwargs)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\nengo_dl\simulator.py", line 1032, in _call_keras
    outputs = getattr(self.keras_model, func_type)(**func_args)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 819, in fit
    use_multiprocessing=use_multiprocessing)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training_arrays.py", line 680, in fit
    steps_name='steps_per_epoch')
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training_arrays.py", line 189, in model_iteration
    f = _make_execution_function(model, mode)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training_arrays.py", line 571, in _make_execution_function
    return model._make_execution_function(mode)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 2125, in _make_execution_function
    self._make_train_function()
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 2057, in _make_train_function
    params=self._collected_trainable_weights, loss=self.total_loss)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 503, in get_updates
    grads = self.get_gradients(loss, params)
  File "C:\ProgramData\Anaconda3\envs\tf210\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 397, in get_gradients
    "K.argmax, K.round, K.eval.".format(param))
ValueError: Variable <tf.Variable 'TensorGraph/base_params/trainable_float32_1:0' shape=(1,) dtype=float32> has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-4-205839bf5640> in <module>()
     96         loss={out_p: tf.losses.SparseCategoricalCrossentropy(from_logits=True)})
     97 
---> 98     sim.fit(train_data, {out_p: train_labels}, epochs=epochs)
     99 
    100 

13 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in get_gradients(self, loss, params)
    467                            "gradient defined (i.e. are differentiable). "
    468                            "Common ops without gradient: "
--> 469                            "K.argmax, K.round, K.eval.".format(param))
    470       grads = self._clip_gradients(grads)
    471     return grads

ValueError: Variable <tf.Variable 'TensorGraph/base_params/trainable_float32_1:0' shape=(1,) dtype=float32> has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import random
import nengo
import nengo_dl
import requests

seed = 1
amp =1
max_rates = 100
intercepts = 0
tau_rc = 0.02
noise_filter = 0.1 #noise_filter

train_data_rate = 0.85
learning_rate = 0.001
epochs = 5
np.random.seed(seed)

do_train = True

url = "https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat"
birth_file = requests.get(url)
birth_all_data = birth_file.text.split('\r\n')
birth_header = [x for x in birth_all_data[0].split('\t') if len(x)>=1]
birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_all_data[1:] if len(y)>=1]
data_size = len(birth_data)

x_data = np.array([x[1:8] for x in birth_data]) 
y_data = np.array([y[0] for y in birth_data])

train_samples = round(data_size*train_data_rate)
train_indices = np.random.choice(data_size, train_samples, replace=False)
testset = set(range(data_size)) -  set(train_indices)
test_indices = np.array(list(testset))

x_train = x_data[train_indices]
y_train = np.transpose([y_data[train_indices]])

x_test = x_data[test_indices]
y_test = np.transpose([y_data[test_indices]])

def normalize_cols(m):
    col_max = m.max(axis=0)     
    col_min = m.min(axis=0)
    return (m - col_min) / (col_max - col_min)

x_train = np.nan_to_num(normalize_cols(x_train))
x_test = np.nan_to_num(normalize_cols(x_test))

##################################################
nfeatures = 7
#minibatch_size = 189 - train_samples
minibatch_size=1

with nengo.Network(seed=seed) as net:
    net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rates])
    net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([intercepts])    
    neuron_type=nengo.LIF(amplitude=amp, tau_rc=tau_rc)

    nengo_dl.configure_settings(stateful=False)
        
    inp = nengo.Node([0] * nfeatures)    
    ens = nengo.Ensemble(1, 1, neuron_type=neuron_type)
    x = nengo.Connection(inp, ens.neurons, transform=nengo_dl.dists.Glorot(), synapse=None) 
    
    inp_p = nengo.Probe(inp)
    out_p = nengo.Probe(x, label="out_p")
    out_p_filt = nengo.Probe(x, synapse=noise_filter, label="out_p_filt")

sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)

n_steps=20

train_data = np.reshape(x_train, (x_train.shape[0], 1, nfeatures))
train_labels = np.reshape(y_train, (y_train.shape[0], 1, 1))

test_data = np.tile(np.reshape(x_test, (x_test.shape[0], 1, nfeatures)), (1, n_steps, 1))
test_labels = np.tile(np.reshape(y_test, (y_test.shape[0], 1, 1)), (1, n_steps, 1))


def accuracy(outputs, targets):    
    return 100 * tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(outputs)), targets), tf.float32))

sim.compile(loss={out_p_filt: accuracy})
print("accuracy before training:", sim.evaluate(test_data, {out_p_filt: test_labels}, verbose=0)["loss"])

do_training = do_train;
if do_training:

    sim.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
        loss={out_p: tf.losses.SparseCategoricalCrossentropy(from_logits=True)})

    sim.fit(train_data, {out_p: train_labels}, epochs=epochs)