Home > other >  LSTM
LSTM

Time:05-27

The import tensorflow.com pat. V1 as tf
Tf. Disable_v2_behavior ()
The from tensorflow. Examples. Tutorials. Mnist import input_data
Tf. Reset_default_graph ()

Mnist=input_data. Read_data_sets ('./data/fashion - mnist_data ', one_hot=True)

Lr=0.001
Keep_prob=tf. Placeholder (tf) float32, [])

# at the time of training and testing using different batch_size, so using the way a placeholder for data
Batch_size=tf. Placeholder (tf) int32, [])

28 d # define the characteristics of the input for every moment, every moment is 28 pixel data input line
Input_size=28

# define each hidden layer node number of the
Hidden_size=256

# define temporal constant length, that is, each for a forecast, need to input the number of rows
Timestep_size=28

# LSTM layer
Layer_num=2

# the final output of type
Classes_num=10

X=tf. Placeholder (tf) float32, [None, 784])
Y=tf. Placeholder (tf) float32, [None, classes_num])

"'
Key steps: to realize RNN/LSTM
1, the input shape of RNN=[batch_size timestep_size, input_size],
2, define a LSTM_cell basic unit, is explained in this need only hidden_size, it will automatically matching the input dimension of X,
Lstm_cell=tf. Nn. Rnn_cell. BasicLSTMCell (num_units=hidden_size forget_bias=1.0, state_is_tuple=True)
3, add dropout layer, set only commonly out_keep_prob
Lstm_cell=tf. Nn. Rnn_cell. DropoutWrapper (cell_lstm_cell input_keep_prob=1.0, output_keep_prob=keep_prob)
. 4, call tf. Nn. Rnn_cell MultiRNNCell () multilayer LSTM
5, initial state to a state of full 0
6, with tf. Nn. Dynamic_rnn () to build a good network to run
"'


# def Multi_LSTM (hidden_size, keep_prob batch_size, x) :
# X=tf. Reshape (X, [1, 28, 28])
# lstm_cell=RNN. LSTMCell (hidden_size, reuse=tf get_variable_scope () reuse)
# lstm_cell=RNN. DropoutWrapper (lstm_cell, output_keep_prob=keep_prob)
# m_lstm=RNN. MultiRNNCell ([lstm_cell for _ in range (layer_num)], state_is_tuple=True)
# init_state=m_lstm. Zero_state (batch_size, dtype=tf float32)
# outputs, state=tf. Nn. Dynamic_rnn (m_lstm, inputs=X, initial_state=init_state, time_major=False)
# h_state=outputs/: 1, :
# return h_state

# h_state=Multi_LSTM (hidden_size keep_prob, batch_size, x)
X=tf. Reshape (X, [1, 28, 28])
With tf. Name_scope (' weight '), tf, variable_scope (' weight ', reuse=tf AUTO_REUSE) :
W=tf. Variable (tf) truncated_normal ([hidden_size classes_num], stddev=0.1), dtype=tf. Float32)
Tf. Summary. The histogram (' weight, W)

With tf. Name_scope (' bias'), tf, variable_scope (' bias', reuse=tf AUTO_REUSE) :
Bias=tf. Variable (tf) constant (0.1, shape=[classes_num]), dtype=tf. Float32)
Tf. Summary. The histogram (' bias' bias)

# multiple LSTM prediction function
# def Multi_LSTM () :
# cell=tf. Nn. Rnn_cell. MultiRNNCell ([tf. Nn. Rnn_cell. BasicLSTMCell (hidden_size) for _ in the range (layer_num)])
# init_state=cell. Zero_state (batch_size, dtype=tf float32)
# outputs, state=tf. Nn. Dynamic_rnn (cell, inputs=X, initial_state=init_state, time_major=False)
# h_state=outputs/: 1, :
# y_pre=tf. Nn. Softmax (tf) matmul (h_state, W) + bias)
# return y_pre


# multiple LSTM prediction function
Def Single_LSTM (hidden_size batch_size, X) :
With tf. Name_scope (' cell_lstm), tf, variable_scope (' cell_lstm, reuse=tf AUTO_REUSE) :
With tf. Name_scope (' cell '), tf, variable_scope (' cell ', reuse=tf AUTO_REUSE) :
Cell=tf. Nn. Rnn_cell. BasicLSTMCell (hidden_size, reuse=tf get_variable_scope () reuse)

Init_state=cell. Zero_state (batch_size, dtype=tf float32)
Outputs, state=tf. Nn. Dynamic_rnn (cell, inputs=X, initial_state=init_state, time_major=False)
With tf. Name_scope (' out - the state), tf, variable_scope (' out - state, reuse=tf AUTO_REUSE) :
H_state=outputs/: 1, :
Tf. Summary. The histogram (' h_state 'h_state)
Y_pre=tf. Nn. Softmax (tf) matmul (h_state, W) + bias)
Return y_pre

# y_pre=Multi_LSTM ()
With tf. Name_scope (' y_pre), tf, variable_scope (' y_pre, reuse=tf AUTO_REUSE) :
Y_pre=Single_LSTM (hidden_size batch_size, X)
Tf. Summary. The histogram (' bias' bias)
# cross_entropy=- tf. Reduce_mean (y * tf in the log (y_pre))

With tf. Name_scope (' cross_entropy), tf, variable_scope (' cross_entropy, reuse=tf AUTO_REUSE) :
Cross_entropy=tf. Reduce_mean (tf) nn) softmax_cross_entropy_with_logits_v2 (logits=y_pre, labels=y))
Tf. The summary. Scalar (' cross_entropy 'cross_entropy)

Train_op=tf. Train. AdamOptimizer (lr). Minimize (cross_entropy)

Correct_prediction=tf. Equal (tf) argmax (y_pre, 1), tf, argmax (y, 1))
With tf. Name_scope (' accuracy_train), tf, variable_scope (' accuracy_train, reuse=tf AUTO_REUSE) :
Accuracy=tf. Reduce_mean (tf) cast (correct_prediction, 'float'))
Tf. The summary. Scalar (' accuracy_train 'accuracy)

With tf. Name_scope (' accuracy_test), tf, variable_scope (' accuracy_test, reuse=tf AUTO_REUSE) :
Test_accuracy=tf. Reduce_mean (tf) cast (correct_prediction, 'float'))
Tf. The summary. Scalar (' accuracy_test 'test_accuracy)

# saver=tf. Train. Saver ()
Init=tf. Global_variables_initializer ()
nullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnull