conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(x)
if batch_norm is True:
conv = layers.BatchNormalization(axis=axis)(conv)
conv = layers.Activation('relu')(conv)
conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(conv)
if batch_norm is True:
conv = layers.BatchNormalization(axis=axis)(conv)
conv = layers.Activation('relu')(conv)
if dropout > 0:
conv = layers.Dropout(dropout)(conv)
shortcut = layers.Conv2D(size, kernel_size=(1, 1), padding='same')(x)
if batch_norm is True:
shortcut = layers.BatchNormalization(axis=axis)(shortcut)
res_path = layers.add([shortcut, conv])
return res_path
This code is giving me the following error:
'>' not supported between instances of 'tuple' and 'int'
CodePudding user response:
Highly likely, that you are providing a tuple for the dropout
value,
or one of the other parameters filter_size, size, axis
. When I populate those parameters with some suitable values I can run your code. Alternatively, your input data x can contain some malformed data, but in order to debug further, you should provide more context.
CodePudding user response:
def expend_as(tensor, rep):
return layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=3),
arguments={'repnum': rep})(tensor)
def double_conv_layer(x, filter_size, size, dropout, batch_norm=False):
axis = 3
conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(x)
if batch_norm is True:
conv = layers.BatchNormalization(axis=axis)(conv)
conv = layers.Activation('relu')(conv)
conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(conv)
if batch_norm is True:
conv = layers.BatchNormalization(axis=axis)(conv)
conv = layers.Activation('relu')(conv)
if dropout > 0:
conv = layers.Dropout(dropout)(conv)
shortcut = layers.Conv2D(size, kernel_size=(1, 1), padding='same')(x)
if batch_norm is True:
shortcut = layers.BatchNormalization(axis=axis)(shortcut)
res_path = layers.add([shortcut, conv])
return res_path
def gating_signal(input, out_size, batch_norm=False):
"""
resize the down layer feature map into the same dimension as the up layer feature map
using 1x1 conv
:param input: down-dim feature map
:param out_size:output channel number
:return: the gating feature map with the same dimension of the up layer feature map
"""
x = layers.Conv2D(out_size, (1, 1), padding='same')(input)
if batch_norm:
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
return x
def attention_block(x, gating, inter_shape):
shape_x = K.int_shape(x)
shape_g = K.int_shape(gating)
theta_x = layers.Conv2D(inter_shape, (2, 2), strides=(2, 2), padding='same')(x) # 16
shape_theta_x = K.int_shape(theta_x)
phi_g = layers.Conv2D(inter_shape, (1, 1), padding='same')(gating)
upsample_g = layers.Conv2DTranspose(inter_shape, (3, 3),
strides=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]),
padding='same')(phi_g) # 16
concat_xg = layers.add([upsample_g, theta_x])
act_xg = layers.Activation('relu')(concat_xg)
psi = layers.Conv2D(1, (1, 1), padding='same')(act_xg)
sigmoid_xg = layers.Activation('sigmoid')(psi)
shape_sigmoid = K.int_shape(sigmoid_xg)
upsample_psi = layers.UpSampling2D(size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2]))(sigmoid_xg) # 32
upsample_psi = expend_as(upsample_psi, shape_x[3])
y = layers.multiply([upsample_psi, x])
result = layers.Conv2D(shape_x[3], (1, 1), padding='same')(y)
result_bn = layers.BatchNormalization()(result)
return result_bn
def Attention_ResUNet(dropout_rate=0.0, batch_norm=True):
# input data
# dimension of the image depth
inputs = layers.Input((512, 512, 3), dtype=tf.float32)
axis = 3
# Downsampling layers
# DownRes 1, double residual convolution pooling
conv_512 = double_conv_layer(inputs, 3, 64, dropout_rate, batch_norm)
pool_256 = layers.MaxPooling2D(pool_size=(2,2))(conv_512)
# DownRes 2
conv_256 = double_conv_layer(pool_256, 3, 2*64, dropout_rate, batch_norm)
pool_128 = layers.MaxPooling2D(pool_size=(2,2))(conv_256)
# DownRes 3
conv_128 = double_conv_layer(pool_128, 3, 4*64, dropout_rate, batch_norm)
pool_64 = layers.MaxPooling2D(pool_size=(2,2))(conv_128)
# DownRes 4
conv_64 = double_conv_layer(pool_64, 3, 8*64, dropout_rate, batch_norm)
pool_32 = layers.MaxPooling2D(pool_size=(2,2))(conv_64)
# DownRes 5, convolution only
conv_32 = double_conv_layer(pool_32, 3, 16*64, dropout_rate, batch_norm)
# Upsampling layers
# UpRes 6, attention gated concatenation upsampling double residual convolution
gating_64 = gating_signal(conv_32, 8*64, batch_norm)
att_64 = attention_block(conv_64, gating_64, 8*64)
up_64 = layers.UpSampling2D(size=(2, 2), data_format="channels_last")(conv_32)
up_64 = layers.concatenate([up_64, att_64], axis=axis)
up_conv_64 = double_conv_layer(up_64, 3, 8*64, dropout_rate, batch_norm)
# UpRes 7
gating_128 = gating_signal(up_conv_64, 4*64, batch_norm)
att_128 = attention_block(conv_128, gating_128, 4*64)
up_128 = layers.UpSampling2D(size=(2, 2), data_format="channels_last")(up_conv_64)
up_128 = layers.concatenate([up_128, att_128], axis=axis)
up_conv_128 = double_conv_layer(up_128, 3, 4*64, dropout_rate, batch_norm)
# UpRes 8
gating_256 = gating_signal(up_conv_128, 2*64, batch_norm)
att_256 = attention_block(conv_256, gating_256, 2*64)
up_256 = layers.UpSampling2D(size=(2, 2), data_format="channels_last")(up_conv_128)
up_256 = layers.concatenate([up_256, att_256], axis=axis)
up_conv_256 = double_conv_layer(up_256, 3, 2*64, dropout_rate, batch_norm)
# UpRes 9
gating_512 = gating_signal(up_conv_128, 64, batch_norm)
att_512 = attention_block(conv_512, gating_512, 64)
up_512 = layers.UpSampling2D(size=(2, 2), data_format="channels_last")(up_conv_256)
up_512 = layers.concatenate([up_512, att_512], axis=axis)
up_conv_512 = double_conv_layer(up_512, 3, 64, dropout_rate, batch_norm)
# 1*1 convolutional layers
# valid padding
# batch normalization
# sigmoid nonlinear activation
conv_final = layers.Conv2D(1, kernel_size=(1,1))(up_conv_512)
conv_final = layers.BatchNormalization(axis=axis)(conv_final)
conv_final = layers.Activation('relu')(conv_final)
# Model integration
model = models.Model(inputs, conv_final, name="AttentionResUNet")
return model
input_shape=(512,512,3)
model=Attention_ResUNet( dropout_rate=0.0, batch_norm=True)
model.summary()
This is the full code