LSTM

ysz12316 2021-05-26 10:34:44
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()

mnist = input_data.read_data_sets('./data/fashion-mnist_data', one_hot=True)

lr = 0.001
keep_prob = tf.placeholder(tf.float32, [])

# 在训练和测试的时候采用不同的 batch_size, 因此使用占位符的方式给数据
batch_size = tf.placeholder(tf.int32, [])

# 定义每个时刻输入的特征为 28 维的,就是每个时刻输入一行28个像素的数据
input_size = 28

# 定义每个隐藏层的节点数目
hidden_size = 256

# 定义时序持续的长度,即每做一次预测,需要先输入的行数
timestep_size = 28

# LSTM 的层数
layer_num = 2

# 最后输出的种类
classes_num = 10

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, classes_num])

'''
实现 RNN/LSTM 的关键步骤:
1、RNN 的输入 shape = [batch_size, timestep_size, input_size]。
2、定义一个 LSTM_cell 的基本单元,在此只需要说明 hidden_size,它会自动匹配输入 X 的维度。
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
3、添加 dropout layer,一般只设置 out_keep_prob
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(cell_lstm_cell, input_keep_prob=1.0, output_keep_prob=keep_prob)
4、调用 tf.nn.rnn_cell.MultiRNNCell()实现多层LSTM
5、初始化 state 为全 0 状态
6、用 tf.nn.dynamic_rnn()来让构建好的网络运行起来
'''


# def Multi_LSTM(hidden_size, keep_prob, batch_size, x):
# X = tf.reshape(x, [-1, 28, 28])
# lstm_cell = rnn.LSTMCell(hidden_size, reuse=tf.get_variable_scope().reuse)
# lstm_cell = rnn.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)
# m_lstm = rnn.MultiRNNCell([lstm_cell for _ in range(layer_num)], state_is_tuple=True)
# init_state = m_lstm.zero_state(batch_size, dtype=tf.float32)
# outputs, state = tf.nn.dynamic_rnn(m_lstm, inputs=X, initial_state=init_state, time_major=False)
# h_state = outputs[:, -1, :]
# return h_state

# h_state = Multi_LSTM(hidden_size, keep_prob, batch_size, x)
X = tf.reshape(x, [-1, 28, 28])
with tf.name_scope('weight'), tf.variable_scope('weight', reuse=tf.AUTO_REUSE):
W = tf.Variable(tf.truncated_normal([hidden_size, classes_num], stddev=0.1), dtype=tf.float32)
tf.summary.histogram('weight', W)

with tf.name_scope('bias'), tf.variable_scope('bias', reuse=tf.AUTO_REUSE):
bias = tf.Variable(tf.constant(0.1, shape=[classes_num]), dtype=tf.float32)
tf.summary.histogram('bias', bias)

# 多个LSTM的预测函数
# def Multi_LSTM():
# cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(hidden_size) for _ in range(layer_num)])
# init_state = cell.zero_state(batch_size, dtype=tf.float32)
# outputs, state = tf.nn.dynamic_rnn(cell, inputs=X, initial_state=init_state, time_major=False)
# h_state = outputs[:, -1, :]
# y_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)
# return y_pre


# 多个LSTM的预测函数
def Single_LSTM(hidden_size, batch_size, X):
with tf.name_scope('cell_lstm'), tf.variable_scope('cell_lstm', reuse=tf.AUTO_REUSE):
with tf.name_scope('cell'), tf.variable_scope('cell', reuse=tf.AUTO_REUSE):
cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, reuse=tf.get_variable_scope().reuse)

init_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, state = tf.nn.dynamic_rnn(cell, inputs=X, initial_state=init_state, time_major=False)
with tf.name_scope('out-state'), tf.variable_scope('out-state', reuse=tf.AUTO_REUSE):
h_state = outputs[:, -1, :]
tf.summary.histogram('h_state', h_state)
y_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)
return y_pre

# y_pre = Multi_LSTM()
with tf.name_scope('y_pre'), tf.variable_scope('y_pre', reuse=tf.AUTO_REUSE):
y_pre = Single_LSTM(hidden_size, batch_size, X)
tf.summary.histogram('bias', bias)
# cross_entropy = -tf.reduce_mean(y * tf.log(y_pre))

with tf.name_scope('cross_entropy'), tf.variable_scope('cross_entropy', reuse=tf.AUTO_REUSE):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pre, labels=y))
tf.summary.scalar('cross_entropy', cross_entropy)

train_op = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(y, 1))
with tf.name_scope('accuracy_train'), tf.variable_scope('accuracy_train', reuse=tf.AUTO_REUSE):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.summary.scalar('accuracy_train', accuracy)

with tf.name_scope('accuracy_test'), tf.variable_scope('accuracy_test', reuse=tf.AUTO_REUSE):
test_accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.summary.scalar('accuracy_test', test_accuracy)

# saver = tf.train.Saver()
init = tf.global_variables_initializer()
writer_train = 'log/train/'
writer_test = 'log/test/'
sess = tf.InteractiveSession()
merged = tf.summary.merge_all()
writer_train = tf.summary.FileWriter(writer_train, sess.graph)
writer_test = tf.summary.FileWriter(writer_test)
# writer = tf.summary.FileWriter("log/", sess.graph)
sess.run(init)

for i in range(2000):
batch = mnist.train.next_batch(128)
if (i+1) % 100 == 0:
train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0, batch_size: 128})
print("Iter%d, step %d, Loss : %g" % (mnist.train.epochs_completed, (i + 1), train_accuracy))
print('Test Accuracy :', sess.run(test_accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0,
batch_size: mnist.test.images.shape[0]}))
result_train = sess.run(merged, feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0, batch_size: 128})
result_test = sess.run(merged, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0,
batch_size: mnist.test.images.shape[0]})
writer_train.add_summary(result_train, i)
writer_test.add_summary(result_test, i)
sess.run(train_op, feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5, batch_size: 128})
# saver.save(sess, 'MNIST_DATA/save')
...全文
153 回复 打赏 收藏 转发到动态 举报
写回复
用AI写文章
回复
切换为时间正序
请发表友善的回复…
发表回复

37,721

社区成员

发帖
与我相关
我的任务
社区描述
JavaScript,VBScript,AngleScript,ActionScript,Shell,Perl,Ruby,Lua,Tcl,Scala,MaxScript 等脚本语言交流。
社区管理员
  • 脚本语言(Perl/Python)社区
  • IT.BOB
加入社区
  • 近7日
  • 近30日
  • 至今

试试用AI创作助手写篇文章吧