Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/jpa/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow 如何用tf.GradientTape模拟ReLU梯度_Tensorflow_Gradient_Derivative_Activation_Relu - Fatal编程技术网

Tensorflow 如何用tf.GradientTape模拟ReLU梯度

Tensorflow 如何用tf.GradientTape模拟ReLU梯度,tensorflow,gradient,derivative,activation,relu,Tensorflow,Gradient,Derivative,Activation,Relu,TensorFlow有一个名为梯度带的功能,类似于使用蒙特卡罗方法(?)获得梯度 我试着模拟ReLU的梯度,但这对X的负半部分不起作用 #colab or ipython reset %reset -f #libs import tensorflow as tf; #init tf.enable_eager_execution(); #code x = tf.convert_to_tensor([-3,-2,-1,0,1,2,3],dtype=tf.float32); with tf.G

TensorFlow有一个名为梯度带的功能,类似于使用蒙特卡罗方法(?)获得梯度

我试着模拟ReLU的梯度,但这对X的负半部分不起作用

#colab or ipython reset
%reset -f

#libs
import tensorflow as tf;

#init
tf.enable_eager_execution();

#code
x = tf.convert_to_tensor([-3,-2,-1,0,1,2,3],dtype=tf.float32);

with tf.GradientTape() as t:
  t.watch(x);
  y = fx = x; #THIS IS JUST THE POSITIVE HALF OF X

dy_dx = t.gradient(y,x);
print(dy_dx); 

我想我必须在
y=fx=x
行中更改一些内容,比如如果x以下
grad
函数模拟ReLU函数的条件x,则添加一个
,但我不知道这是否是建议的方法:

#ipython
%reset -f

#libs
import tensorflow as tf;
import numpy      as np;

#init
tf.enable_eager_execution();

#code
X = tf.convert_to_tensor([-3,-2,-1,0,1,2,3], dtype=tf.float32);

with tf.GradientTape() as T:
  T.watch(X);
  Y = Fx = X;
#end with

Dy_Dx = T.gradient(Y,X);
#print(Dy_Dx);

#get gradient of function Fx with conditional X
def grad(Y,At):
  if (At<=0): return 0;

  for I in range(len(X)):
    if X[I].numpy()==At:
      return Dy_Dx[I].numpy();
#end def

print(grad(Y,-3));
print(grad(Y,-2));
print(grad(Y,-1));
print(grad(Y,-0));
print(grad(Y,1));
print(grad(Y,2));
print(grad(Y,3));

print("\nDone.");
#eof
#ipython
%重置-f
#自由基
导入tensorflow作为tf;
输入numpy作为np;
#初始化
tf.enable_eager_execution();
#代码
X=tf.convert_to_张量([-3,-2,-1,0,1,2,3],dtype=tf.float32);
将tf.GradientTape()作为T:
手表(X);
Y=Fx=X;
#以
Dy_Dx=T.梯度(Y,X);
#打印(Dy_Dx);
#使用条件X获取函数Fx的梯度
def梯度(Y,At):
如果(在)
tf.Tensor([0. 0. 0. 0. 1. 1. 1.], shape=(7,), dtype=float32)
#ipython
%reset -f

#libs
import tensorflow as tf;
import numpy      as np;

#init
tf.enable_eager_execution();

#code
X = tf.convert_to_tensor([-3,-2,-1,0,1,2,3], dtype=tf.float32);

with tf.GradientTape() as T:
  T.watch(X);
  Y = Fx = X;
#end with

Dy_Dx = T.gradient(Y,X);
#print(Dy_Dx);

#get gradient of function Fx with conditional X
def grad(Y,At):
  if (At<=0): return 0;

  for I in range(len(X)):
    if X[I].numpy()==At:
      return Dy_Dx[I].numpy();
#end def

print(grad(Y,-3));
print(grad(Y,-2));
print(grad(Y,-1));
print(grad(Y,-0));
print(grad(Y,1));
print(grad(Y,2));
print(grad(Y,3));

print("\nDone.");
#eof