Python 如何计算张量流?

Python 如何计算张量流?,python,tensorflow,deep-learning,Python,Tensorflow,Deep Learning,当我学习CNN的时候,我发现博客就像是在吹 //---forward Propagation,InputData is image data void cnnff(CNN* cnn,float** inputData) { int outSizeW=cnn->S2->inputWidth; int outSizeH=cnn->S2->inputHeight; int i,j,r,c; //---the first,convolution

当我学习CNN的时候,我发现博客就像是在吹

//---forward Propagation,InputData is image data
void cnnff(CNN* cnn,float** inputData)
{
    int outSizeW=cnn->S2->inputWidth;
    int outSizeH=cnn->S2->inputHeight;
    int i,j,r,c;

    //---the first,convolution C1
    nSize mapSize={cnn->C1->mapSize,cnn->C1->mapSize};
    nSize inSize={cnn->C1->inputWidth,cnn->C1->inputHeight};
    nSize outSize={cnn->S2->inputWidth,cnn->S2->inputHeight};
    for(i=0;i<(cnn->C1->outChannels);i++){
        for(j=0;j<(cnn->C1->inChannels);j++){
            float** mapout=cov(cnn->C1->mapData[j][i],mapSize,inputData,inSize,valid);
            addmat(cnn->C1->v[i],cnn->C1->v[i],outSize,mapout,outSize);
            for(r=0;r<outSize.r;r++)
                free(mapout[r]);
            free(mapout);
        }
        for(r=0;r<outSize.r;r++)
            for(c=0;c<outSize.c;c++)
                cnn->C1->y[i][r][c]=activation_Sigma(cnn->C1->v[i][r][c],cnn->C1->basicData[i]);
    }

    //the second,pooling S2
    outSize.c=cnn->C3->inputWidth;
    outSize.r=cnn->C3->inputHeight;
    inSize.c=cnn->S2->inputWidth;
    inSize.r=cnn->S2->inputHeight;
    for(i=0;i<(cnn->S2->outChannels);i++){
        if(cnn->S2->poolType==AvePool)
            avgPooling(cnn->S2->y[i],outSize,cnn->C1->y[i],inSize,cnn->S2->mapSize);
    }
}
def _run_fn(session, feed_dict, fetch_list, target_list, options,
            run_metadata):
  # Ensure any changes to the graph are reflected in the runtime.
  self._extend_graph()
  with errors.raise_exception_on_not_ok_status() as status:
    if self._created_with_new_api:
      return tf_session.TF_SessionRun_wrapper(
          session, options, feed_dict, fetch_list, target_list,
          run_metadata, status)
    else:
      return tf_session.TF_Run(session, options,
                               feed_dict, fetch_list, target_list,
                               status, run_metadata)
def TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs):
    return _pywrap_tensorflow_internal.TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs)
TF_Run = _pywrap_tensorflow_internal.TF_Run
他用C语言做cnn,那是参考Matlab的DeepLearnToolbox cnn。 代码就像是一个打击

//---forward Propagation,InputData is image data
void cnnff(CNN* cnn,float** inputData)
{
    int outSizeW=cnn->S2->inputWidth;
    int outSizeH=cnn->S2->inputHeight;
    int i,j,r,c;

    //---the first,convolution C1
    nSize mapSize={cnn->C1->mapSize,cnn->C1->mapSize};
    nSize inSize={cnn->C1->inputWidth,cnn->C1->inputHeight};
    nSize outSize={cnn->S2->inputWidth,cnn->S2->inputHeight};
    for(i=0;i<(cnn->C1->outChannels);i++){
        for(j=0;j<(cnn->C1->inChannels);j++){
            float** mapout=cov(cnn->C1->mapData[j][i],mapSize,inputData,inSize,valid);
            addmat(cnn->C1->v[i],cnn->C1->v[i],outSize,mapout,outSize);
            for(r=0;r<outSize.r;r++)
                free(mapout[r]);
            free(mapout);
        }
        for(r=0;r<outSize.r;r++)
            for(c=0;c<outSize.c;c++)
                cnn->C1->y[i][r][c]=activation_Sigma(cnn->C1->v[i][r][c],cnn->C1->basicData[i]);
    }

    //the second,pooling S2
    outSize.c=cnn->C3->inputWidth;
    outSize.r=cnn->C3->inputHeight;
    inSize.c=cnn->S2->inputWidth;
    inSize.r=cnn->S2->inputHeight;
    for(i=0;i<(cnn->S2->outChannels);i++){
        if(cnn->S2->poolType==AvePool)
            avgPooling(cnn->S2->y[i],outSize,cnn->C1->y[i],inSize,cnn->S2->mapSize);
    }
}
def _run_fn(session, feed_dict, fetch_list, target_list, options,
            run_metadata):
  # Ensure any changes to the graph are reflected in the runtime.
  self._extend_graph()
  with errors.raise_exception_on_not_ok_status() as status:
    if self._created_with_new_api:
      return tf_session.TF_SessionRun_wrapper(
          session, options, feed_dict, fetch_list, target_list,
          run_metadata, status)
    else:
      return tf_session.TF_Run(session, options,
                               feed_dict, fetch_list, target_list,
                               status, run_metadata)
def TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs):
    return _pywrap_tensorflow_internal.TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs)
TF_Run = _pywrap_tensorflow_internal.TF_Run
当执行函数“tf_session.tf_Run”时,它只返回(丢失、准确性 ),但看不到该值如何更改

然后我在C:\Users\xxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow1\Lib\site packages\Tensorflow\python\pywrap\u Tensorflow\u internal.py跟踪Tensorflow代码,代码类似于blow

//---forward Propagation,InputData is image data
void cnnff(CNN* cnn,float** inputData)
{
    int outSizeW=cnn->S2->inputWidth;
    int outSizeH=cnn->S2->inputHeight;
    int i,j,r,c;

    //---the first,convolution C1
    nSize mapSize={cnn->C1->mapSize,cnn->C1->mapSize};
    nSize inSize={cnn->C1->inputWidth,cnn->C1->inputHeight};
    nSize outSize={cnn->S2->inputWidth,cnn->S2->inputHeight};
    for(i=0;i<(cnn->C1->outChannels);i++){
        for(j=0;j<(cnn->C1->inChannels);j++){
            float** mapout=cov(cnn->C1->mapData[j][i],mapSize,inputData,inSize,valid);
            addmat(cnn->C1->v[i],cnn->C1->v[i],outSize,mapout,outSize);
            for(r=0;r<outSize.r;r++)
                free(mapout[r]);
            free(mapout);
        }
        for(r=0;r<outSize.r;r++)
            for(c=0;c<outSize.c;c++)
                cnn->C1->y[i][r][c]=activation_Sigma(cnn->C1->v[i][r][c],cnn->C1->basicData[i]);
    }

    //the second,pooling S2
    outSize.c=cnn->C3->inputWidth;
    outSize.r=cnn->C3->inputHeight;
    inSize.c=cnn->S2->inputWidth;
    inSize.r=cnn->S2->inputHeight;
    for(i=0;i<(cnn->S2->outChannels);i++){
        if(cnn->S2->poolType==AvePool)
            avgPooling(cnn->S2->y[i],outSize,cnn->C1->y[i],inSize,cnn->S2->mapSize);
    }
}
def _run_fn(session, feed_dict, fetch_list, target_list, options,
            run_metadata):
  # Ensure any changes to the graph are reflected in the runtime.
  self._extend_graph()
  with errors.raise_exception_on_not_ok_status() as status:
    if self._created_with_new_api:
      return tf_session.TF_SessionRun_wrapper(
          session, options, feed_dict, fetch_list, target_list,
          run_metadata, status)
    else:
      return tf_session.TF_Run(session, options,
                               feed_dict, fetch_list, target_list,
                               status, run_metadata)
def TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs):
    return _pywrap_tensorflow_internal.TF_Run(session, run_options, feed_dict, output_names, target_nodes, out_status, run_outputs)
TF_Run = _pywrap_tensorflow_internal.TF_Run

pywrap_tensorflow_internal.py已经使用了_pywrap_tensorflow_internal.pyd,我认为如何更改该值是在这个.pyd上。那么,这个.pyd源代码在哪里?因为这个.pyd只能通过“pip安装tensorflow”下载

pyd文件类似于windows动态库

这有助于:


也许您需要了解tensorflow是如何从头编译的,以便了解它的所有问题。pyd;)

我想您的意思是想知道如何实现
tf.nn.conv2d
和排序。如果您是TensorFlow的新手,您会注意到有
(例如
tf.layers.conv2d
)和
nn
(例如
tf.nn.conv2d
)<代码>层都是
nn
的包装器,因此,如果您只想马上开始实现,请忽略

现在,如果你读了,它说:

在生成的文件中定义:
tensorflow/python/ops/gen_nn_ops.py

为了便于比较,请看下面的示例:

定义于

现在,如果单击
tensorflow/python/ops/nn_ops.py
,它实际上会将您带到定义了
tf.nn.conv2d_transpose
的文件。但是对于您感兴趣的
tf.nn.conv2d
,此链接不存在。这是因为你可以在C++中编写图层,让TensorFlow生成Python部分,因此在<强>生成的文件中定义了文本。实际实现分布在以下三个文件中:


    • 我希望您正在寻找CNN内部的工作方式。下面的代码说明如何在内部执行conv操作。下面的代码相当于这个张量流API

      有关此特定操作的更多信息,请参见

      注:这只是CNN第一层的conv操作

      Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME')
      
      def conv_forward(A_prev, W, b, hparameters):
      """
      Implements the forward propagation for a convolution function
      
      Arguments:
      A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
      W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
      b -- Biases, numpy array of shape (1, 1, 1, n_C)
      hparameters -- python dictionary containing "stride" and "pad"
      
      Returns:
      Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
      cache -- cache of values needed for the conv_backward() function
      """
      
      # Retrieve dimensions from A_prev's shape (≈1 line)  
      (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
      
      # Retrieve dimensions from W's shape (≈1 line)
      (f, f, n_C_prev, n_C) = W.shape
      
      # Retrieve information from "hparameters" (≈2 lines)
      stride = hparameters['stride']
      pad = hparameters['pad']
      
      # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)
      n_H = int(np.floor((n_H_prev-f+2*pad)/stride)) + 1
      n_W = int(np.floor((n_W_prev-f+2*pad)/stride)) + 1
      
      # Initialize the output volume Z with zeros. (≈1 line)
      Z = np.zeros((m,n_H,n_W,n_C))
      
      # Create A_prev_pad by padding A_prev
      A_prev_pad = zero_pad(A_prev,pad)
      
      for i in range(m):                               # loop over the batch of training examples
          a_prev_pad = A_prev_pad[i]                               # Select ith training example's padded activation
          for h in range(n_H):                           # loop over vertical axis of the output volume
              for w in range(n_W):                       # loop over horizontal axis of the output volume
                  for c in range(n_C):                   # loop over channels (= #filters) of the output volume
      
                      # Find the corners of the current "slice" (≈4 lines)
                      vert_start = h*stride
                      vert_end = vert_start+f
                      horiz_start = w*stride
                      horiz_end = horiz_start+f
      
                      # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
                      a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:]
      
                      # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)
                      Z[i, h, w, c] = conv_single_step(a_slice_prev,W[:,:,:,c],b[:,:,:,c])                                      
      
      return Z
      
      
          A_prev = np.random.randn(1,64,64,3)
          W = np.random.randn(4,4,3,8)
          #Don't worry about bias , tensorflow will take care of this.
          b = np.random.randn(1,1,1,8)
          hparameters = {"pad" : 1,
                         "stride": 1}
      
          Z = conv_forward(A_prev, W, b, hparameters)