Python 3.x 如何使用tensorflow中的python3预测LSTM模型中的情绪?

Python 3.x 如何使用tensorflow中的python3预测LSTM模型中的情绪?,python-3.x,testing,tensorflow,lstm,rnn,Python 3.x,Testing,Tensorflow,Lstm,Rnn,在变量中加载了一些正文件和负文件 wordsList = np.load('training_data/wordsList.npy') wordsList = wordsList.tolist() #Originally loaded as numpy array wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8 wordVectors = np.load('training_data/

在变量中加载了一些正文件和负文件

wordsList = np.load('training_data/wordsList.npy')
wordsList = wordsList.tolist() #Originally loaded as numpy array
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
wordVectors = np.load('training_data/wordVectors.npy')
培训和测试方法

with tf.device('/gpu:0'):
    ids = np.zeros((numFiles, maxSeqLength), dtype='int32')
    fileCounter = 0
    for pf in positiveFiles:
       with open(pf, "r") as f:
           indexCounter = 0
           line=f.readline()
           cleanedLine = cleanSentences(line)
           split = cleanedLine.split()
           for word in split:
               try:
                   ids[fileCounter][indexCounter] = wordsList.index(word)
               except ValueError:
                   ids[fileCounter][indexCounter] = 399999 #Vector for unkown words
               #print('value :' + str(ids))
               indexCounter = indexCounter + 1
               if indexCounter >= maxSeqLength:
                   break
           fileCounter = fileCounter + 1

    for nf in negativeFiles:
       with open(nf, "r") as f:
           indexCounter = 0
           line=f.readline()
           cleanedLine = cleanSentences(line)
           split = cleanedLine.split()
           for word in split:
               try:
                   ids[fileCounter][indexCounter] = wordsList.index(word)
               except ValueError:
                   ids[fileCounter][indexCounter] = 399999 #Vector for unkown words
              # print('value :' + str(ids))
               indexCounter = indexCounter + 1
               if indexCounter >= maxSeqLength:
                   break
           fileCounter = fileCounter + 1
    #Pass into embedding function and see if it evaluates.

np.save('idsMatrix', ids)

batchSize = 24

请帮助。

使用输入和输出命名,然后从图中检索张量进行预测;我建议进行一些必要的更改和附加代码,以实现预测

new_saver = tf.train.import_meta_graph('models/pretrained....')
new_saver.restore(sess, tf.train.latest_checkpoint('models/./')) 
还原代码:这里使用model.meta和model的相对/绝对路径

...
input_data = tf.placeholder(tf.int32, [batchSize, maxSeqLength], name='inputs')
...
prediction = (tf.matmul(last, weight) + bias)
# you may use softmax if you want probabilities for prediction, but not for calculating the loss
# prediction = tf.nn.softmax(prediction)
prediction = tf.identity(prediction, name='prediction')
...
with tf.device('/gpu:0'):
    for i in range(iterations):
        nextBatch, nextBatchLabels = getTrainBatch();
        sess.run(optimizer, {input_data: nextBatch, labels: nextBatchLabels}
    saver.save(sess, 'model')

对输入和输出使用命名,然后从图中检索张量进行预测;我建议进行一些必要的更改和附加代码,以实现预测

new_saver = tf.train.import_meta_graph('models/pretrained....')
new_saver.restore(sess, tf.train.latest_checkpoint('models/./')) 
还原代码:这里使用model.meta和model的相对/绝对路径

...
input_data = tf.placeholder(tf.int32, [batchSize, maxSeqLength], name='inputs')
...
prediction = (tf.matmul(last, weight) + bias)
# you may use softmax if you want probabilities for prediction, but not for calculating the loss
# prediction = tf.nn.softmax(prediction)
prediction = tf.identity(prediction, name='prediction')
...
with tf.device('/gpu:0'):
    for i in range(iterations):
        nextBatch, nextBatchLabels = getTrainBatch();
        sess.run(optimizer, {input_data: nextBatch, labels: nextBatchLabels}
    saver.save(sess, 'model')