Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
使用CNN和LSTM的Tensorflow中占位符大小和类型错误_Tensorflow_Deep Learning_Lstm_Convolutional Neural Network - Fatal编程技术网

使用CNN和LSTM的Tensorflow中占位符大小和类型错误

使用CNN和LSTM的Tensorflow中占位符大小和类型错误,tensorflow,deep-learning,lstm,convolutional-neural-network,Tensorflow,Deep Learning,Lstm,Convolutional Neural Network,我使用以下代码组合CNN和LSTM: from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy import stats import te

我使用以下代码组合CNN和LSTM:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import pyfftw
from scipy import signal
import xlrd
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
import seaborn as sns

from sklearn.metrics import confusion_matrix

##matplotlib inline
plt.style.use('ggplot')


## define funtions
def read_data(file_path):
##    column_names = ['user-id','activity','timestamp', 'x-axis', 'y-axis', 'z-axis']
    column_names = ['activity','timestamp', 'Ax', 'Ay', 'Az', 'Lx', 'Ly', 'Lz', 'Gx', 'Gy', 'Gz', 'Mx', 'My', 'Mz'] ## 3 sensors
    data = pd.read_csv(file_path,header = None, names = column_names)
    return data

def feature_normalize(dataset):
    mu = np.mean(dataset,axis = 0)
    sigma = np.std(dataset,axis = 0)
    return (dataset - mu)/sigma

def plot_axis(ax, x, y, title):
    ax.plot(x, y)
    ax.set_title(title)
    ax.xaxis.set_visible(False)
    ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])
    ax.set_xlim([min(x), max(x)])
    ax.grid(True)

def plot_activity(activity,data):
    fig, (ax0, ax1, ax2) = plt.subplots(nrows = 3, figsize = (15, 10), sharex = True)
    plot_axis(ax0, data['timestamp'], data['Ax'], 'x-axis')
    plot_axis(ax1, data['timestamp'], data['Ay'], 'y-axis')
    plot_axis(ax2, data['timestamp'], data['Az'], 'z-axis')
    plt.subplots_adjust(hspace=0.2)
    fig.suptitle(activity)
    plt.subplots_adjust(top=0.90)
    plt.show()

def windows(data, size):
    start = 0
    while start < data.count():
        yield start, start + size
        start += (size / 2)

def segment_signal(data, window_size = None, num_channels=None): # edited
    segments = np.empty((0,window_size,num_channels)) #change from 3 to 9 channels for AGM fusion #use variable num_channels=9
    labels = np.empty((0))
    for (n_start, n_end) in windows(data['timestamp'], window_size):
##        x = data["x-axis"][start:end]
##        y = data["y-axis"][start:end]
##        z = data["z-axis"][start:end]
        n_start = int(n_start)
        n_end = int(n_end)
        Ax = data["Ax"][n_start:n_end]
        Ay = data["Ay"][n_start:n_end]
        Az = data["Az"][n_start:n_end]
        Lx = data["Lx"][n_start:n_end]
        Ly = data["Ly"][n_start:n_end]
        Lz = data["Lz"][n_start:n_end]
        Gx = data["Gx"][n_start:n_end]
        Gy = data["Gy"][n_start:n_end]
        Gz = data["Gz"][n_start:n_end]
        Mx = data["Mx"][n_start:n_end]
        My = data["My"][n_start:n_end]
        Mz = data["Mz"][n_start:n_end]
        if(len(data['timestamp'][n_start:n_end]) == window_size): # include only windows with size of 90
            segments = np.vstack([segments,np.dstack([Ax,Ay,Az,Gx,Gy,Gz,Mx,My,Mz])])
            labels = np.append(labels,stats.mode(data["activity"][n_start:n_end])[0][0])
    return segments, labels

def weight_variable(shape, restore_name):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial, name=restore_name)

def bias_variable(shape, restore_name):
    initial = tf.constant(0.0, shape = shape)
    return tf.Variable(initial, name=restore_name)

def depthwise_conv2d(x, W):
    return tf.nn.depthwise_conv2d(x,W, [1, 1, 1, 1], padding='VALID')

def apply_depthwise_conv(x,weights,biases):
    return tf.nn.relu(tf.add(depthwise_conv2d(x, weights),biases))

def apply_max_pool(x,kernel_size,stride_size):
    return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1], 
                          strides=[1, 1, stride_size, 1], padding='VALID') 

#------------------------get dataset----------------------#

## run shoaib_dataset.py to generate dataset_shoaib_total.txt

## get data from dataset_shoaib_total.txt
dataset_belt = read_data('dataset_shoaibsensoractivity_participant_belt.txt')
dataset_left_pocket = read_data('dataset_shoaibsensoractivity_participant_left_pocket.txt')
dataset_right_pocket = read_data('dataset_shoaibsensoractivity_participant_right_pocket.txt')
dataset_upper_arm = read_data('dataset_shoaibsensoractivity_participant_upper_arm.txt')
dataset_wrist = read_data('dataset_shoaibsensoractivity_participant_wrist.txt')



#--------------------preprocessing------------------------#

dataset_belt['Ax'] = feature_normalize(dataset_belt['Ax'])
dataset_belt['Ay'] = feature_normalize(dataset_belt['Ay'])
dataset_belt['Az'] = feature_normalize(dataset_belt['Az'])
dataset_belt['Gx'] = feature_normalize(dataset_belt['Gx'])
dataset_belt['Gy'] = feature_normalize(dataset_belt['Gy'])
dataset_belt['Gz'] = feature_normalize(dataset_belt['Gz'])
dataset_belt['Mx'] = feature_normalize(dataset_belt['Mx'])
dataset_belt['My'] = feature_normalize(dataset_belt['My'])
dataset_belt['Mz'] = feature_normalize(dataset_belt['Mz'])

dataset_left_pocket['Ax'] = feature_normalize(dataset_left_pocket['Ax'])
dataset_left_pocket['Ay'] = feature_normalize(dataset_left_pocket['Ay'])
dataset_left_pocket['Az'] = feature_normalize(dataset_left_pocket['Az'])
dataset_left_pocket['Gx'] = feature_normalize(dataset_left_pocket['Gx'])
dataset_left_pocket['Gy'] = feature_normalize(dataset_left_pocket['Gy'])
dataset_left_pocket['Gz'] = feature_normalize(dataset_left_pocket['Gz'])
dataset_left_pocket['Mx'] = feature_normalize(dataset_left_pocket['Mx'])
dataset_left_pocket['My'] = feature_normalize(dataset_left_pocket['My'])
dataset_left_pocket['Mz'] = feature_normalize(dataset_left_pocket['Mz'])

dataset_right_pocket['Ax'] = feature_normalize(dataset_right_pocket['Ax'])
dataset_right_pocket['Ay'] = feature_normalize(dataset_right_pocket['Ay'])
dataset_right_pocket['Az'] = feature_normalize(dataset_right_pocket['Az'])
dataset_right_pocket['Gx'] = feature_normalize(dataset_right_pocket['Gx'])
dataset_right_pocket['Gy'] = feature_normalize(dataset_right_pocket['Gy'])
dataset_right_pocket['Gz'] = feature_normalize(dataset_right_pocket['Gz'])
dataset_right_pocket['Mx'] = feature_normalize(dataset_right_pocket['Mx'])
dataset_right_pocket['My'] = feature_normalize(dataset_right_pocket['My'])
dataset_right_pocket['Mz'] = feature_normalize(dataset_right_pocket['Mz'])

dataset_upper_arm['Ax'] = feature_normalize(dataset_upper_arm['Ax'])
dataset_upper_arm['Ay'] = feature_normalize(dataset_upper_arm['Ay'])
dataset_upper_arm['Az'] = feature_normalize(dataset_upper_arm['Az'])
dataset_upper_arm['Gx'] = feature_normalize(dataset_upper_arm['Gx'])
dataset_upper_arm['Gy'] = feature_normalize(dataset_upper_arm['Gy'])
dataset_upper_arm['Gz'] = feature_normalize(dataset_upper_arm['Gz'])
dataset_upper_arm['Mx'] = feature_normalize(dataset_upper_arm['Mx'])
dataset_upper_arm['My'] = feature_normalize(dataset_upper_arm['My'])
dataset_upper_arm['Mz'] = feature_normalize(dataset_upper_arm['Mz'])


dataset_wrist['Ax'] = feature_normalize(dataset_wrist['Ax'])
dataset_wrist['Ay'] = feature_normalize(dataset_wrist['Ay'])
dataset_wrist['Az'] = feature_normalize(dataset_wrist['Az'])
dataset_wrist['Gx'] = feature_normalize(dataset_wrist['Gx'])
dataset_wrist['Gy'] = feature_normalize(dataset_wrist['Gy'])
dataset_wrist['Gz'] = feature_normalize(dataset_wrist['Gz'])
dataset_wrist['Mx'] = feature_normalize(dataset_wrist['Mx'])
dataset_wrist['My'] = feature_normalize(dataset_wrist['My'])
dataset_wrist['Mz'] = feature_normalize(dataset_wrist['Mz'])


#------------------fixed hyperparameters--------------------#

window_size = 200 #from 90 #FIXED at 4 seconds


#----------------input hyperparameters------------------#

input_height = 1
input_width = window_size
num_labels = 7
num_channels = 9 #from 3 channels #9 channels for AGM


#-------------------sliding time window----------------#

segments_belt, labels_belt = segment_signal(dataset_belt, window_size=window_size, num_channels=num_channels)
labels_belt = np.asarray(pd.get_dummies(labels_belt), dtype = np.int8)
reshaped_segments_belt = segments_belt.reshape(len(segments_belt), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_left_pocket, labels_left_pocket = segment_signal(dataset_left_pocket, window_size=window_size, num_channels=num_channels)
labels_left_pocket = np.asarray(pd.get_dummies(labels_left_pocket), dtype = np.int8)
reshaped_segments_left_pocket = segments_left_pocket.reshape(len(segments_left_pocket), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_right_pocket, labels_right_pocket = segment_signal(dataset_right_pocket, window_size=window_size, num_channels=num_channels)
labels_right_pocket = np.asarray(pd.get_dummies(labels_right_pocket), dtype = np.int8)
reshaped_segments_right_pocket = segments_right_pocket.reshape(len(segments_right_pocket), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_upper_arm, labels_upper_arm = segment_signal(dataset_upper_arm, window_size=window_size, num_channels=num_channels)
labels_upper_arm = np.asarray(pd.get_dummies(labels_upper_arm), dtype = np.int8)
reshaped_segments_upper_arm = segments_upper_arm.reshape(len(segments_upper_arm), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_wrist, labels_wrist = segment_signal(dataset_wrist, window_size=window_size, num_channels=num_channels)
labels_wrist = np.asarray(pd.get_dummies(labels_wrist), dtype = np.int8)
reshaped_segments_wrist = segments_wrist.reshape(len(segments_wrist), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels



##reshaped_segments = np.vstack([reshaped_segments1,reshaped_segments2,reshaped_segments3,reshaped_segments4,reshaped_segments5,reshaped_segments6,reshaped_segments7,reshaped_segments8,reshaped_segments9,reshaped_segments10])
##labels = np.vstack([labels1,labels2,labels3,labels4,labels5,labels6,labels7,labels8,labels9,labels10])



# all locations
reshaped_segments = np.vstack([reshaped_segments_belt,reshaped_segments_left_pocket,reshaped_segments_right_pocket,reshaped_segments_upper_arm,reshaped_segments_wrist])
labels = np.vstack([labels_belt,labels_left_pocket,labels_right_pocket,labels_upper_arm,labels_wrist]) 


#------------divide data into test and training `set-----------#

train_test_split = np.random.rand(len(reshaped_segments)) < 0.70
train_x = reshaped_segments[train_test_split]
train_y = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]



#---------------training hyperparameters----------------#

batch_size = 10
kernel_size = 60 #from 60 #optimal 2
depth = 15 #from 60 #optimal 15
num_hidden = 1000 #from 1000 #optimal 80

learning_rate = 0.0001
training_epochs = 8


total_batches = train_x.shape[0] ##// batch_size # included // batch_size



#---------define placeholders for input----------#

X = tf.placeholder(tf.float32, shape=[None,input_width * num_channels], name="input")
X_reshaped = tf.reshape(X,[-1,input_height,input_width,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])


#---------------------perform convolution-----------------#

# first convolutional layer 
c_weights = weight_variable([1, kernel_size, num_channels, depth], restore_name="c_weights")
c_biases = bias_variable([depth * num_channels], restore_name="c_biases")

c = apply_depthwise_conv(X_reshaped,c_weights,c_biases)
p = apply_max_pool(c,20,2)

# second convolutional layer
c2_weights = weight_variable([1, 6,depth*num_channels,depth//10], restore_name="c2_weights")
c2_biases = bias_variable([(depth*num_channels)*(depth//10)], restore_name="c2_biases")

c2 = apply_depthwise_conv(p,c2_weights,c2_biases)


n_classes = 7
n_hidden = 128
n_inputs = 540 # 540 = 60*3 not 180 # or 7*9*10
lstm_size = 128

rnnW = {
    'hidden': tf.Variable(tf.random_normal([n_inputs, n_hidden])),
    'output': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}

rnnBiases = {
    'hidden': tf.Variable(tf.random_normal([n_hidden], mean=1.0)),
    'output': tf.Variable(tf.random_normal([n_classes]))
}

c2Reshape = tf.reshape(c2, [-1, 7, 200])
shuff = tf.transpose(c2Reshape, [1, 0, 2])
shuff = tf.reshape(shuff, [-1, n_inputs])

# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
    shuff, rnnW['hidden']
) + rnnBiases['hidden'])

# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(axis=0, num_or_size_splits=7, value=hidden)

lstm_cell = tf.contrib.rnn.LSTMCell(lstm_size, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lstm_layers = tf.contrib.rnn.MultiRNNCell([lstm_cell] * 2)

lstmOutputs, _ = tf.contrib.rnn.static_rnn(lstm_layers, hidden, dtype=tf.float32)
lstmLastOutput = lstmOutputs[-1]
y_ = tf.matmul(lstmLastOutput, rnnW['output']) + rnnBiases['output']





#-----------------loss optimization-------------#

loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
##optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)


#-----------------compute accuracy---------------#

correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

cost_history = np.empty(shape=[1],dtype=float)
saver = tf.train.Saver()



#-----------------run session--------------------#

session = tf.Session()
session.run(tf.global_variables_initializer())

for epoch in range(8):
    for b in range(total_batches):    
        offset = (b * batch_size) % (train_y.shape[0] - batch_size)
        batch_x = train_x[offset:(offset + batch_size), :]
        batch_y = train_y[offset:(offset + batch_size), :]
        _, c = session.run([optimizer, loss],feed_dict={X: batch_x, Y : batch_y})
        cost_history = np.append(cost_history,c)
    print("Epoch: ",epoch," Training Loss: ",c," Training Accuracy: ",\
            session.run(accuracy, feed_dict={X: train_x, Y: train_y}))

print("Testing Accuracy:", session.run(accuracy, feed_dict={X: test_x, Y: test_y}))

if 1==1:
    print ("Testing Accuracy: ", session.run(accuracy, feed_dict={X: test_x, Y: test_y}),'\n')
    pred_y = session.run(tf.argmax(y_ ,1),feed_dict={X: test_x})
    cm = confusion_matrix(np.argmax(test_y ,1),pred_y)
    print (cm, '\n')
    plt.imshow(cm)
    plt.title('Confusion Matrix')
    plt.rcParams['image.cmap'] = 'afmhot'
    plt.colorbar()
    tick_marks = np.arange(len(['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld']))
    plt.xticks(tick_marks, ['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld'])
    plt.yticks(tick_marks, ['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld'])

    fmt = '.2f'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.figure()
    plt.show()
来自未来导入绝对导入
来自未来进口部
来自未来导入打印功能
进口itertools
作为pd进口熊猫
将numpy作为np导入
将matplotlib.pyplot作为plt导入
从scipy导入统计信息
导入tensorflow作为tf
导入pyfftw
从scipy输入信号
导入xlrd
从tensorflow.python.tools导入冻结图
从tensorflow.python.tools导入针对推理库的优化
导入seaborn作为sns
从sklearn.metrics导入混淆矩阵
##matplotlib内联
plt.style.use('ggplot'))
##定义功能
def读取数据(文件路径):
##列名称=['user-id'、'activity'、'timestamp'、'x轴'、'y轴'、'z轴']
列名称=[“活动”、“时间戳”、“Ax”、“Ay”、“Az”、“Lx”、“Ly”、“Lz”、“Gx”、“Gy”、“Gz”、“Mx”、“我的”、“Mz”]#3个传感器
data=pd.read\u csv(文件路径,标题=None,名称=column\u名称)
返回数据
def功能规格化(数据集):
mu=np.平均值(数据集,轴=0)
sigma=np.std(数据集,轴=0)
返回(数据集-mu)/西格玛
def绘图_轴(ax、x、y、标题):
轴图(x,y)
ax.设置标题(标题)
ax.xaxis.set_可见(False)
最大设定值([min(y)-np.std(y),max(y)+np.std(y)])
最大设定值(最小(x),最大(x)])
ax.grid(真)
def plot_活动(活动、数据):
图(ax0,ax1,ax2)=plt.子批次(nrows=3,figsize=(15,10),sharex=True)
绘图_轴(ax0,数据['timestamp',数据['Ax','x轴])
绘图_轴(ax1,数据['timestamp'],数据['Ay'],'y轴')
绘图_轴(ax2,数据['timestamp'],数据['Az'],'z轴')
plt.子批次调整(hspace=0.2)
图1:标题(活动)
plt.子批次调整(顶部=0.90)
plt.show()
def窗口(数据、大小):
开始=0
启动时loss = -tf.reduce_sum(Y * tf.log(y_))
c2Reshape = tf.reshape(c2, [-1, 7, 200])