|
|
|
|
|
import tensorflow as tf |
|
|
|
|
|
def initialize_model(img_width, img_height, img_channels): |
|
|
|
|
|
|
|
inputs = tf.keras.layers.Input((img_width, img_height, img_channels), name='input') |
|
|
|
|
|
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block1_conv2d_1')(inputs) |
|
c1 = tf.keras.layers.Dropout(0.1, name='block1_dropout')(c1) |
|
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block1_conv2d_2')(c1) |
|
p1 = tf.keras.layers.MaxPooling2D((2, 2), name='block1_max_pooling')(c1) |
|
|
|
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block2_conv2d_1')(p1) |
|
c2 = tf.keras.layers.Dropout(0.1, name='block2_dropout')(c2) |
|
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block2_conv2d_2')(c2) |
|
p2 = tf.keras.layers.MaxPooling2D((2, 2), name='block2_max_pooling')(c2) |
|
|
|
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block3_conv2d_1')(p2) |
|
c3 = tf.keras.layers.Dropout(0.2, name='block3_dropout')(c3) |
|
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block3_conv2d_2')(c3) |
|
p3 = tf.keras.layers.MaxPooling2D((2, 2), name='block3_max_pooling')(c3) |
|
|
|
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block4_conv2d_1')(p3) |
|
c4 = tf.keras.layers.Dropout(0.2, name='block4_dropout')(c4) |
|
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block4_conv2d_2')(c4) |
|
p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name='block4_max_pooling')(c4) |
|
|
|
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block5_conv2d_1')(p4) |
|
c5 = tf.keras.layers.Dropout(0.3, name='block5_dropout')(c5) |
|
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block5_conv2d_2')(c5) |
|
|
|
|
|
u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same', name='block6_conv2d_transpose')(c5) |
|
u6 = tf.keras.layers.concatenate([u6, c4], name='block6_concatenate') |
|
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block6_conv2d_1')(u6) |
|
c6 = tf.keras.layers.Dropout(0.2, name='block6_dropout')(c6) |
|
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block6_conv2d_2')(c6) |
|
|
|
u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same', name='block7_conv2d_transpose')(c6) |
|
u7 = tf.keras.layers.concatenate([u7, c3], name='block7_concatenate') |
|
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block7_conv2d_1')(u7) |
|
c7 = tf.keras.layers.Dropout(0.2, name='block7_dropout')(c7) |
|
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block7_conv2d_2')(c7) |
|
|
|
u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same', name='block8_conv2d_transpose')(c7) |
|
u8 = tf.keras.layers.concatenate([u8, c2], name='block8_concatenate') |
|
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block8_conv2d_1')(u8) |
|
c8 = tf.keras.layers.Dropout(0.1, name='block8_dropout')(c8) |
|
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block8_conv2d_2')(c8) |
|
|
|
u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same', name='block9_conv2d_transpose')(c8) |
|
u9 = tf.keras.layers.concatenate([u9, c1], axis=3, name='block9_concatenate') |
|
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block9_conv2d_1')(u9) |
|
c9 = tf.keras.layers.Dropout(0.1, name='block9_dropout')(c9) |
|
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block9_conv2d_2')(c9) |
|
|
|
outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid', name='output')(c9) |
|
|
|
model = tf.keras.Model(inputs = [inputs], outputs = [outputs]) |
|
|
|
|
|
|
|
return model |
|
|
|
|
|
def initialize_model_v2(img_width, img_height, img_channels, |
|
optimizer = 'adam', model_loss = 'binary_crossentropy', data_augm = False): |
|
|
|
n = 1 |
|
inputs = tf.keras.layers.Input((img_width, img_height, img_channels)) |
|
|
|
c1 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(inputs) |
|
|
|
c1 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1) |
|
p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1) |
|
|
|
c2 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1) |
|
|
|
c2 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2) |
|
p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2) |
|
|
|
c3 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2) |
|
|
|
c3 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3) |
|
p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3) |
|
|
|
c4 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3) |
|
|
|
c4 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4) |
|
p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4) |
|
|
|
c5 = tf.keras.layers.Conv2D(256 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4) |
|
|
|
c5 = tf.keras.layers.Conv2D(256 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5) |
|
c5 = tf.keras.layers.Dropout(0.5)(c5) |
|
|
|
|
|
u6 = tf.keras.layers.Conv2DTranspose(128 * n, (2, 2), strides=(2, 2), padding='same')(c5) |
|
u6 = tf.keras.layers.concatenate([u6, c4]) |
|
c6 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6) |
|
|
|
c6 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6) |
|
|
|
u7 = tf.keras.layers.Conv2DTranspose(64 * n, (2, 2), strides=(2, 2), padding='same')(c6) |
|
u7 = tf.keras.layers.concatenate([u7, c3]) |
|
c7 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7) |
|
|
|
c7 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7) |
|
|
|
u8 = tf.keras.layers.Conv2DTranspose(32 * n, (2, 2), strides=(2, 2), padding='same')(c7) |
|
u8 = tf.keras.layers.concatenate([u8, c2]) |
|
c8 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8) |
|
|
|
c8 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8) |
|
|
|
u9 = tf.keras.layers.Conv2DTranspose(16 * n, (2, 2), strides=(2, 2), padding='same')(c8) |
|
u9 = tf.keras.layers.concatenate([u9, c1], axis=3) |
|
c9 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9) |
|
|
|
c9 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9) |
|
|
|
outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9) |
|
|
|
model = tf.keras.Model(inputs = [inputs], outputs = [outputs]) |
|
|
|
|
|
|
|
return model |
|
|
|
|
|
|
|
def pix2pix_upsample(filters, size, norm_type='batchnorm', apply_dropout=False): |
|
"""Upsamples an input. |
|
Conv2DTranspose => Batchnorm => Dropout => Relu |
|
Args: |
|
filters: number of filters |
|
size: filter size |
|
norm_type: Normalization type; either 'batchnorm' or 'instancenorm'. |
|
apply_dropout: If True, adds the dropout layer |
|
Returns: |
|
Upsample Sequential Model |
|
""" |
|
|
|
initializer = tf.random_normal_initializer(0., 0.02) |
|
|
|
result = tf.keras.Sequential() |
|
result.add( |
|
tf.keras.layers.Conv2DTranspose(filters, size, strides=2, |
|
padding='same', |
|
kernel_initializer=initializer, |
|
use_bias=False)) |
|
|
|
if norm_type.lower() == 'batchnorm': |
|
result.add(tf.keras.layers.BatchNormalization()) |
|
elif norm_type.lower() == 'instancenorm': |
|
result.add(InstanceNormalization()) |
|
|
|
if apply_dropout: |
|
result.add(tf.keras.layers.Dropout(0.5)) |
|
|
|
result.add(tf.keras.layers.ReLU()) |
|
|
|
return result |
|
|
|
|
|
|
|
def unet_model(output_channels:int, input_shape=[128, 128, 3], |
|
optimizer = 'adam', model_loss = 'binary_crossentropy'): |
|
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape, include_top=False) |
|
|
|
|
|
layer_names = [ |
|
'block_1_expand_relu', |
|
'block_3_expand_relu', |
|
'block_6_expand_relu', |
|
'block_13_expand_relu', |
|
'block_16_project', |
|
] |
|
base_model_outputs = [base_model.get_layer(name).output for name in layer_names] |
|
|
|
|
|
down_stack = tf.keras.Model(inputs=base_model.input, outputs=base_model_outputs) |
|
|
|
down_stack.trainable = False |
|
|
|
up_stack = [ |
|
pix2pix_upsample(512, 3), |
|
pix2pix_upsample(256, 3), |
|
pix2pix_upsample(128, 3), |
|
pix2pix_upsample(64, 3), |
|
] |
|
|
|
inputs = tf.keras.layers.Input(shape=input_shape) |
|
|
|
|
|
skips = down_stack(inputs) |
|
x = skips[-1] |
|
skips = reversed(skips[:-1]) |
|
|
|
|
|
for up, skip in zip(up_stack, skips): |
|
x = up(x) |
|
concat = tf.keras.layers.Concatenate() |
|
x = concat([x, skip]) |
|
|
|
|
|
last = tf.keras.layers.Conv2DTranspose( |
|
filters=output_channels, kernel_size=3, strides=2, |
|
padding='same') |
|
|
|
x = last(x) |
|
|
|
model = tf.keras.Model(inputs=inputs, outputs=x) |
|
|
|
model.compile(optimizer=optimizer, |
|
|
|
loss=model_loss, |
|
metrics=['acc', dice_coef] |
|
) |
|
|
|
|
|
return model |
|
|