Home > other >  '_TupleWrapper' object is not callable
'_TupleWrapper' object is not callable

Time:09-16

The import tensorflow as tf
The from tensorflow import keras

The class my_vgg13 (tf) keras) Model) :
Def __init__ (self) :
Super (my_vgg13, self) __init__ ()
Self. Con1=tf. Keras. The layers. Conv2D (filters=64, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Con2=tf. Keras. The layers. Conv2D (filters=64, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Pool1=tf. Keras. The layers. MaxPool2D (pool_size=(2, 2], strides=2, padding='the same')

Self. Con3=tf. Keras. The layers. Conv2D (filters=128, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Con4=tf. Keras. The layers. Conv2D (filters=128, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Pool2=tf. Keras. The layers. MaxPool2D (pool_size=[2, 2], strides=2, padding='the same')

Self. Con5=tf. Keras. The layers. Conv2D (filters=256, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Con6=tf. Keras. The layers. Conv2D (filters=256, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Pool3=tf. Keras. The layers. MaxPool2D (pool_size=[2, 2], strides=2, padding='the same')

Self. Con7=tf. Keras. The layers. Conv2D (filters=512, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Con8=tf. Keras. The layers. Conv2D (filters=512, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Pool4=tf. Keras. The layers. MaxPool2D (pool_size=[2, 2], strides=2, padding='the same')

Self. Con9=tf. Keras. The layers. Conv2D (filters=512, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Con10=tf. Keras. The layers. Conv2D (filters=512, kernel_size=[3, 3], padding='the same', activation=tf. The nn. Relu),
Self. Pool5=tf. Keras. The layers. MaxPool2D (pool_size=[2, 2], strides=2, padding='the same')

Self. Flaten=tf. Keras. The layers. Reshape (target_shape=(* 1 * 512, 1))
Self. Dens1=tf. Keras. The layers. Dense (units=256, activation=tf. Nn. Relu)
Self. Dens2=tf. Keras. The layers. Dense (units=128, activation=tf. Nn. Relu)
Self. Dens3=tf. Keras. The layers. Dense (units=100, activation=None)


Def call (self, inputs) :

X=self. Con1 (inputs) # [b, 32,32,3]=& gt; [b, 32,32,64]
X=self. Con2 (x) # [b, 32,32,64]=& gt; [b, 32,32,64]
X=self. Pool1 (x) # [b, 32,32,64]=& gt; [b, 16,16,64]

X=self. Con3 (x)
X=self. Con4 (x)
X=self. Pool2 (x)

X=self. Con5 (x)
X=self. Con6 (x)
X=self. Pool3 (x)

X=self. Con7 (x)
X=self. Con8 (x)
X=self. Pool4 (x)

X=self. Con9 (x)
X=self. Con10 (x)
X=self. Pool5 (x)=# & gt; [b, 8,8,128]

X=self. Flaten (x)
X=self. Dens1 (x)
X=self. Dens2 (x)
X=self. Dens3 (x) # [b, 100]
# out=tf. Nn. Softmax (x)
Return the x

Conv_net=my_vgg13 ()
Optimizer=tf. Keras. Optimizers. Adam (lr=0.001)

(x, y), (x_test, y_test)=tf. Keras. Datasets. Cifar100. Load_data ()

# print (x.s hape, y.s hape, x_test shape, y_test. Shape)

Def the preprocess (x, y) :
X=tf. Cast (x, dtype=tf. Float32)/255.
Y=tf. Cast (y, dtype=tf. Int32)
Return the x, y

Y=tf squeeze (y axis=1)
Y_test=tf squeeze (y_test, axis=1)

# print (x.s hape, y.s hape, x_test shape, y_test. Shape)

Train_db=tf. Data. The Dataset. From_tensor_slices ((x, y))
Train_db=train_db. The map (the preprocess). Shuffle (1000). Batch (128)

Test_db=tf. Data. The Dataset. From_tensor_slices ((x_test y_test))
Test_db=test_db. The map (the preprocess). Batch (128)

The sample=iter (train_db)
Sample_1=next (sample)
# print (sample_1 [0]. Shape)
# print (sample_1 [1]. Shape)


Def the main () :
For epoch in range (10) :
For step, (x, y) in enumerate (train_db) :
With tf. GradientTape () as tape:
# [b, 32,32,3]=& gt; [b, 1,1,512]
Out=conv_net (x)
# flatten,=& gt; [b, 512]
# out=tf. Reshape (out, [- 1, 512])
# logits=fc_net (out)
Y_onehot=tf. One_hot (y, the depth=100)
Loss=tf. Losses. Categorical_crossentropy (y_onehot, out, from_logits=True)
Loss=tf. Reduce_mean (loss)
Grads=tape. Gradient (loss, conv_net trainable_variables)
Optimizer. Apply_gradients (zip (grads, conv_net trainable_variables))
If step % 100==0:
Print (epoch, step, "loss", float (loss))
Total_num=0
Total_correct=0
For x, y in test_db:
Out=conv_net (x)
# out=tf. Reshape (out, [- 1, 512])
# logits=fc_net (out)
Prob=tf. Nn. Softmax (out, axis=1)
Prob=tf. Argmax (prob, axis=1)
Prob=tf. Cast (prob, dtype=tf. Int32)
Correct=tf. Equal (prob, y)
Correct=tf. Reduce_sum (tf) cast (correct, dtype=tf. Int32))
# correct=tf. Cast (tf) equal (prob, y), dtype=tf. Int32)
Total_correct +=int (correct)
Total_num +=x.s hape [0]
The acc=total_correct/total_num
Print (epoch, "test" acc, acc)


If __name__=="__main__" :
The main ()









Run error:
Traceback (the most recent call last) :
nullnullnullnullnullnullnullnullnullnullnullnull
  • Related