https://blog.csdn.net/koibiki/article/details/83116596
@tf_export("nn.rnn_cell.BasicLSTMCell") class BasicLSTMCell(LayerRNNCell): input_depth = inputs_shape[1].value h_depth = self._num_units self._kernel = self.add_variable( _WEIGHTS_VARIABLE_NAME, shape=[input_depth + h_depth, 4 * self._num_units]) self._bias = self.add_variable( _BIAS_VARIABLE_NAME, shape=[4 * self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) 以上是tensorflow LSTMCell中的一段代码,里面可以看到lstm的kernel的shape为 [input_depth + h_depth, 4 * self._num_units] 即[输入深度 + 输出深度, 4 × cell个数],在第二维上的排列顺序为 i、c、f、o。 一个minist的例子验证 为了方便验证,设置 timesteps = 1,这样就可以不用考虑 和的对结果的影响 为了获得中间层的结果,将所有层输出均通过一个字典保存 from __future__ import print_function import tensorflow as tf from tensorflow.contrib import rnn # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data import numpy as np from keras.utils import np_utils import math mnist = input_data.read_data_sets("./data/", one_hot=True) # Training Parameters learning_rate = 0.001 training_steps = 10000 batch_size = 128 display_step = 200 # Network Parameters num_input = 784 # MNIST data input (img shape: 28*28) timesteps = 1 # timesteps num_hidden = 128 # hidden layer num of features num_classes = 10 # MNIST total classes (0-9 digits) # tf Graph input X = tf.placeholder("float", [None, timesteps, num_input]) Y = tf.placeholder("float", [None, num_classes]) # Define weights weights = { 'w': tf.Variable(tf.random_normal([num_hidden, num_classes])) } biases = { 'b': tf.Variable(tf.random_normal([num_classes])) } def RNN(x, weights, biases): x = tf.unstack(x, timesteps, 1) lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) logits = tf.matmul(outputs[-1], weights['w']) + biases['b'] out = {'lstm':lstm_cell, 'lstm_out':outputs, 'states':states, 'logits': logits} return out out = RNN(X, weights, biases) logits = out['logits'] prediction = tf.nn.softmax(logits) # Define loss and optimizer loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=Y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model (with test logits, for dropout to be disabled) correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training sess = tf.Session() # Run the initializer sess.run(init) for step in range(1, training_steps+1): batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, timesteps, num_input)) # Run optimization op (backprop) # print(batch_x.shape) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y}) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Optimization Finished!") # Calculate accuracy for 1 mnist test images test_data = mnist.test.images[:1].reshape((-1, timesteps, num_input)) test_label = mnist.test.labels[:1] 训练完成,运行lstm cell的tensor,传入test数据,获得中间层lstm cell输出结果,其中test数据只有1个。 lstm_out = sess.run(out['lstm_out'], feed_dict={X:test_data}) 得到lstm cell结果 [array([[ 0.0598105 , -0.14341736, 0.02396348, -0.08234564, -0.04004124, ....]], dtype=float32)] 运行kernel和bias tersor获得权重 lstm_kernel = sess.run(out['lstm'].weights[0]) lstm_bias = sess.run(out['lstm'].weights[1]) 分拆权重参数 lstm_k_i = lstm_kernel[:784, :128] lstm_k_h_i = lstm_kernel[784:, :128] lstm_k_c = lstm_kernel[:784, 128:128*2] lstm_k_h_c = lstm_kernel[784:, 128:128*2] lstm_k_f = lstm_kernel[:784, 128*2:128*3] lstm_k_h_f = lstm_kernel[784:, 128*2:128*3] lstm_k_o = lstm_kernel[:784, 128*3:] lstm_k_h_o = lstm_kernel[784:, 128*3:] lstm_b_i = lstm_bias[:128] lstm_b_c = lstm_bias[128:128*2] lstm_b_f = lstm_bias[128*2:128*3] lstm_b_o = lstm_bias[128*3:] 定义sigmoid函数 def sigmoid(v): return 1 /(1 + math.exp(-v)) 获得第四个lstm cell的输出,注意,因为timesteps = 1,所以此处没有考虑的影响。 i = sigmoid(np.dot(lstm_k_i[:,3], test_data[0,0]) + lstm_b_i[3]) f = sigmoid(np.dot(lstm_k_f[:,3], test_data[0,0]) + lstm_b_f[3]) ct_ = math.tanh(np.dot(lstm_k_c[:,3], test_data[0,0]) + lstm_b_c[3]) ct = i * ct_ + f*0 o = sigmoid(np.dot(lstm_k_o[:,3], test_data[0,0]) + lstm_b_o[3]) ht = math.tanh(ct) * o -0.08234565254093663 与模型测试输出一致。