Home > OS >  RNN (learning)
RNN (learning)

Time:05-17

#! The/usr/bin/env python
# - * - coding: utf-8 - * -
#
The from __future__ import print_function
The import tensorflow.com pat. V1 as tf
Tf. Disable_v2_behavior ()
The from tensorflow. Examples. Tutorials. Mnist import input_data
Tf. Reset_default_graph ()

Def initialize_weight_bias (in_size out_size) :
Weight=tf. Truncated_normal (shape=(in_size out_size), stddev=0.01, mean=0.0)
Bias=tf. Constant (0.1, shape=[out_size])
Return tf. Variable (weight), tf. The Variable (bias)


Def model (data, target, dropout, num_hidden=200, num_layers=3) :
# establish RNN model
Cells=list ()
For _ in range (num_layers) :
Cell=tf. Nn. Rnn_cell. GRUCell (num_units=num_hidden)
Cell=tf. Nn. Rnn_cell. DropoutWrapper (cell=cell, output_keep_prob=1.0 dropout)
Cells. Append (cell)
The network=tf. Nn. Rnn_cell. MultiRNNCell (cells=cells)
Outputs, last_state=tf. Nn. Dynamic_rnn (cell=network, inputs=data, dtype=tf. Float32)

# get last output
Outputs=tf. Transpose (outputs, (1, 0, 2))
Last_output=tf. Gather (outputs, int (outputs) get_shape () [0]) - 1)

# add softmax layer
Out_size=int (target get_shape () [1])
Weight and bias=initialize_weight_bias (in_size=num_hidden, out_size=out_size)
Logits=tf. The add (tf) matmul (last_output, weight), bias)

Return logits


Def the main () :
# define some parameters
Default_epochs=10
Default_batch_size=64
Default_dropout=0.5
Test_freq=150 # every 150 batches
Logs_path='data/log'

# get train and test the data
Mnist_data=https://bbs.csdn.net/topics/input_data.read_data_sets (' data/mnist, one_hot=True)
Total_steps=int (mnist_data. Train. Num_examples/default_batch_size)
Total_test_steps=int (mnist_data. Test. Num_examples/default_batch_size)
Print (' number of training examples: % d '% mnist_data. The "train". Num_examples) # 55000
Print (' the number of test examples: % d '% mnist_data. The test. The num_examples) # 10000

# fit RNN model
Input_x=tf. Placeholder (tf) float32, shape=(None, 28, 28))
Input_y=tf. Placeholder (tf) float32, shape=(None, 10))
Dropout=tf. Placeholder (tf. Float32)
Input_logits=model (input_x input_y, dropout)

# define loss and optimizer
Loss=tf. Reduce_mean (tf) nn) softmax_cross_entropy_with_logits (logits=input_logits, labels=input_y))
Train_op=tf. Train. RMSPropOptimizer (0.001). Minimize (loss)
Input_prob=tf. Nn. Softmax (input_logits)
Error_count=tf. Not_equal (tf) arg_max (input_prob, 1), tf, arg_max (input_y, 1))
Error_rate_op=tf. Reduce_mean (tf) cast (error_count, tf. Float32))

# add the summary
Tf. The summary. Scalar (' error_rate 'error_rate_op)
Tf. The summary. Scalar (' loss 'loss)
Merge_summary_op=tf. The summary. Merge_all ()

# train and test the
With tf. The Session () as the Session:
The session. The run (tf) global_variables_initializer ())
Summary_writer=tf. The summary. FileWriter (logdir=logs_path, graph=tf get_default_graph ())
# "train"
For epoch in range (default_epochs) :
For step in range (total_steps) :
Train_x, train_y=mnist_data. Train. Next_batch (default_batch_size)
Train_x=train_x. Reshape (1, 28, 28)
Feed_dict={input_x: train_x,
Input_y: train_y,
Dropout: default_dropout}
_, the summary=session. The run ([train_op merge_summary_op], feed_dict=feed_dict)
# write logs
Summary_writer. Add_summary (summary, global_step=epoch * total_steps + step)

# test
If step> 0 and test_freq (step %==0) :
Avg_error=0
For test_step in range (total_test_steps) :
Test_x, test_y=mnist_data. Test. Next_batch (default_batch_size)
Test_x=test_x. Reshape (1, 28, 28)
Feed_dict={input_x: test_x,
Input_y: test_y,
Dropout: 0}
Test_error=session. The run (error_rate_op, feed_dict=feed_dict)
Avg_error +=test_error/total_test_steps
Print (' epoch: % d, steps: % d, avg_test_error: %. 4 f '% (epoch, step, avg_error))

If __name__=="__main__ ':
The main ()
  • Related