Home > other >  How do I save h_fc1 characteristic matrix to down
How do I save h_fc1 characteristic matrix to down

Time:09-26

The import OS
OS. Environ [' TF_CPP_MIN_LOG_LEVEL]='2'
The import numpy as np
The import scipy. IO as the sio
The import tensorflow.com pat. V1 as tf
Tf. Disable_v2_behavior ()
The import XLRD
The from openpyxl import Workbook

# in an interactive way to start the session
# if you don't use an interactive session, before starting the session must build the whole calculation chart, in order to start the calculation chart
# sess=tf. InteractiveSession ()

Data=https://bbs.csdn.net/topics/sio.loadmat (' ballfault_DE. Mat)
Sensorlenth=2048 * 36
Condition number
=4 # conditionsClassification=10 # category
L=2048 # input length
Evfisam_num=int (sensorlenth/L)

Evfitrain_num=int (evfisam_num * 3/4) # of each operating mode for training sample
Evfitest_num=evfisam_num - evfitrain_num # each condition for testing the sample
Div=1
C=4
Al=512
Evdoctrain_num=condition * (evfitrain_num - 1) * C
Evdoctest_num=condition * evfitest_num # category number x condition number x sample of each file
Batch_num=int (evdoctrain_num/div)
Train_num=evdoctrain_num * classification
Test_num condition=evfitest_num * * classification

Cnn_train=np. Zeros ((train_num, L))
Cnn_test=np. Zeros ((test_num, L))
Sensor_1=data [' ballfault]
For I in range (classification * condition) :
Sensor=sensor_1 [0: sensorlenth, I]

Cnn_train_1=sensor [0: L * evfitrain_num]

For j in range (C) : strengthen the C # data time
Cnn_train [* C + j (I) * (evfitrain_num - 1) : (I * C + j + 1) * (evfitrain_num - 1) :]=cnn_train_1 [j * al (evfitrain_num - 1) * L + j * al]. Reshape (evfitrain_num - 1), (L)

Cnn_test_1=sensor [L * evfitrain_num: evfisam_num * L]
Cnn_test [I * evfitest_num: (I + 1) * evfitest_num, :]=cnn_test_1 [0: evfitest_num * L]. Reshape (evfitest_num, L)

Lable_train=np. Zeros (train_num)
Lable_test=np. Zeros (test_num)
For num_dir in range (0, classification) :
Lable_train [num_dir * evdoctrain_num: (+ 1) * evdoctrain_num num_dir]=(num_dir + 1) * np ones (evdoctrain_num)
Lable_test [num_dir * evdoctest_num: (+ 1) * evdoctest_num num_dir]=(num_dir + 1) * np ones (evdoctest_num)

Expect_y=np. Zeros ((train_num, classification))
M=0
For l in lable_train:
Expect_y [m, int (l - 1)]=1
M +=1

Test_expect_y=np. Zeros ((test_num, classification))
M=0
For l in lable_test:
Test_expect_y [m, int (l - 1)]=1
M +=1

The merge=np. Append (cnn_train expect_y, axis=1)
Np. Random. Shuffle (merge) # tf. Random_shuffle (a)
Cnn_train=merge [: 0: L]
Expect_y=merge [: L: L + classification]

Kernel_length1=16
Kernel_length2=10
Kernel_length3=8
Kernel_length4=6
Kernel_length5=16
Kernel_length6=10
Kernel_length7=8
Kernel_length8=6
# L_1=int (L - kernel_length1 + 1)/(4)
# L_2=int (L_1 - kernel_length2 + 1)/(4)
# L_3=int (L_2 - kernel_length3 + 1)/(4)
B=np. Power (2, 8)
L_end=int (L/B)


Kernel_num_1=8
Kernel_num_2=16
Kernel_num_3=9
Kernel_num_4=12
Kernel_num_5=8
Kernel_num_6=16
Kernel_num_7=9
Kernel_num_8=12

Out_num=100

"" "build calculation chart "" "
# through a placeholder for the input image and the target output type create node
# shape parameter is optional, it tensorflow can automatically capture dimension error caused by inconsistent
Initial_input=tf. Placeholder (" float ", shape=[None, L]) # raw input
Initial_y=tf. Placeholder (" float ", shape=[None, classification]) # target

# in order to do this when you are not in the model initialization,
# we define two function is used to initialize
Def weight_variable (shape) :
# truncated normal distribution, stddev is the standard deviation of a normal distribution
Initial=tf. Truncated_normal (shape=shape, stddev=0.05)
Return tf. Variable (initial)
Def bias_variable (shape) :
Initial=tf. Constant (0.1, shape=shape)
Return tf. Variable (initial)

# convolution kernels pooling, step 1, 0 margin
Def conv2d (x, W) :
Return tf. Nn. Conv2d (x, W, strides=[1, 1, 1, 1], the padding='the SAME')
Def max_pool_2x2 (x) :
Return tf. Nn. Max_pool (x, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], the padding='the SAME')

"" "the first layer of convolution "" "
# by a convolution and a maximum of pooling, filter 1 x16 calculate 32 characteristics, because using 32 filter for convolution
# convolution weights of tensor shape is [1, 16, 1, 32], 1 is the number of input channels, 32 is output channel number
, kernel_length1 W_conv1=weight_variable ([1, 1, kernel_num_1])
Each output channel # has an offset
B_conv1=bias_variable ([kernel_num_1])
# the using convolution, must transform the input into 4 d vector, 2, 3 d pictures of wide, high
# the last said the picture color channel (channel number 1, because the gray image is RGB image channel number for the 3)
Process_image=tf. Reshape (initial_input, [1, 1, L, 1))
# the first layer of the convolution results, using Relu as activation function
H_conv1=tf. Nn. Relu (conv2d (process_image W_conv1) + b_conv1)
# after the convolution pooling the results of the first layer
H_pool1=max_pool_2x2 (h_conv1)

"" "the second convolution "" "
W_conv2=weight_variable ([1, kernel_length2, kernel_num_1 kernel_num_2])
B_conv2=bias_variable ([kernel_num_2])
H_conv2=tf. Nn. Relu (conv2d (h_pool1 W_conv2) + b_conv2)
H_pool2=max_pool_2x2 (h_conv2)

"" "the third layer convolution "" "
W_conv3=weight_variable ([1, kernel_length3, kernel_num_2 kernel_num_3])
B_conv3=bias_variable ([kernel_num_3])
H_conv3=tf. Nn. Relu (conv2d (h_pool2 W_conv3) + b_conv3)
H_pool3=max_pool_2x2 (h_conv3)

"" "the fourth floor convolution "" "
W_conv4=weight_variable ([1, kernel_length4, kernel_num_3 kernel_num_4])
B_conv4=bias_variable ([kernel_num_4])
H_conv4=tf. Nn. Relu (conv2d (h_pool3 W_conv4) + b_conv4)
H_pool4=max_pool_2x2 (h_conv4)

nullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnullnull
  • Related