How to use the keras.layers.normalization.BatchNormalization function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ljuvela / ResGAN / train_pls_noisegan.py View on Github external
pls_input = Input(shape=(input_dim,), name="pls_input")
    noise_input = Input(shape=(input_dim,), name="noise_input")
    vuv_input = Input((1,), name="vuv_input")

    pls = Reshape((input_dim, 1))(pls_input)    
    noise = Reshape((input_dim, 1))(noise_input)
    vuv = Reshape((1,1))(vuv_input)
    vuv = UpSampling1D(size=input_dim)(vuv) # is this needed or is broadcasting automatic?

    x = concatenate([pls, noise], axis=2) # concat as different channels

    x = Convolution1D(filters=100,
                        kernel_size=15,
                        padding='same',
                        strides=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.1)(x)

    x = concatenate([pls, x], axis=2) # concat as different channels

    x = Convolution1D(filters=100,
                      kernel_size=15,
                      padding='same',
                      strides=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.1)(x)

    x = concatenate([pls, x], axis=2) # concat as different channels

    x = Convolution1D(filters=100,
                        kernel_size=15,
                        padding='same',
github statech / resnet / resnet / resnet101.py View on Github external
x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2a')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
    x = Activation('relu', name=conv_name_base + '2a_relu')(x)

    x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
    x = Conv2D(nb_filter2, (kernel_size, kernel_size),
               name=conv_name_base + '2b', use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2b')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
    x = Activation('relu', name=conv_name_base + '2b_relu')(x)

    x = Conv2D(nb_filter3, (1, 1),
               name=conv_name_base + '2c', use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2c')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)

    shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
                      name=conv_name_base + '1', use_bias=False)(input_tensor)
    shortcut = BatchNormalization(epsilon=eps, axis=bn_axis,
                                  name=bn_name_base + '1')(shortcut)
    shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)

    x = add([x, shortcut], name='res' + str(stage) + block)
    x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
    return x
github costapt / vess2ret / models.py View on Github external
def BatchNorm(mode=2, axis=1, **kwargs):
    """Convenience method for BatchNormalization layers."""
    if KERAS_2:
        return BatchNormalization(axis=axis, **kwargs)
    else:
        return BatchNormalization(mode=2,axis=axis, **kwargs)
github MoyanZitto / keras-scripts / resnet50 / res_net50.py View on Github external
block: str like 'a','b'.., curretn block
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
    """
    nb_filter1, nb_filter2, nb_filter3 = nb_filter

    out = Convolution2D(nb_filter1, 1, 1, name='res'+str(stage)+block+'_branch2a')(input_tensor)
    out = BatchNormalization(axis=1, name='bn'+str(stage)+block+'_branch2a')(out)
    out = Activation('relu')(out)

    out = out = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
                              name='res'+str(stage)+block+'_branch2b')(out)
    out = BatchNormalization(axis=1, name='bn'+str(stage)+block+'_branch2b')(out)
    out = Activation('relu')(out)

    out = Convolution2D(nb_filter3, 1, 1, name='res'+str(stage)+block+'_branch2c')(out)
    out = BatchNormalization(axis=1, name='bn'+str(stage)+block+'_branch2c')(out)

    out = merge([out, input_tensor], mode='sum')
    out = Activation('relu')(out)
    return out

github xmengli999 / H-DenseUNet / hybridnet.py View on Github external
ac4 = Activation('relu')(bn7)
    conv9 = Conv3D(256, (3, 3, 3), padding="same", kernel_initializer="normal")(ac4)
    bn8 = BatchNormalization()(conv9)
    pad5 = Conv3D(256, (1, 1, 1), padding="same", kernel_initializer="normal")(sumb4_1)
    BN5 = BatchNormalization()(pad5)
    sumb5 = add([BN5, bn8])
    res4  = Activation('relu')(sumb5)

    up1 = UpSampling3D(size=(2, 2, 1))(res4)
    pad6 = Conv3D(256, (1, 1, 1), padding="same", kernel_initializer="normal")(res1)
    BN6 = BatchNormalization()(pad6)
    sumb6 = add([BN6, up1])

    #  resudial block
    conv10 = Conv3D(128, (3, 3, 3), padding="same", kernel_initializer="normal")(sumb6)
    bn9 = BatchNormalization()(conv10)
    ac5 = Activation('relu')(bn9)
    conv11 = Conv3D(128, (3, 3, 3), padding="same", kernel_initializer="normal")(ac5)
    bn10 = BatchNormalization()(conv11)
    pad7 = Conv3D(128, (1, 1, 1), padding="same", kernel_initializer="normal")(sumb6)
    BN7 = BatchNormalization()(pad7)
    sumb7 = add([BN7, bn10])
    res5  = Activation('relu')(sumb7)

    up2 = UpSampling3D(size=(2, 2, 1))(res5)
    pad8 = Conv3D(128, (1, 1, 1), padding="same", kernel_initializer="normal")(ac0)
    BN8 = BatchNormalization()(pad8)
    sumb8 = add([BN8, up2])

    #  resudial block
    conv12 = Conv3D(64, (3, 3, 3), padding="same", kernel_initializer="normal")(sumb8)
    bn11= BatchNormalization()(conv12)
github raun1 / MICCAI2018---Complementary_Segmentation_Network-Raw-Code / src / comp_net_raw.py View on Github external
merge10=concatenate([up7,conv7a,conv7b,conv7c,conv7d,conv7e,conv7f,conv7g,conv7h,conv7i,conv7j,conv7k])
    conv7l=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(merge10)
    conv7l = bn()(conv7l)

    merge11=concatenate([up7,conv7a,conv7b,conv7c,conv7d,conv7e,conv7f,conv7g,conv7h,conv7i,conv7j,conv7k,conv7l])
    conv7m=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(merge11)
    conv7m = bn()(conv7m)


    merge12=concatenate([up7,conv7a,conv7b,conv7c,conv7d,conv7e,conv7f,conv7g,conv7h,conv7i,conv7j,conv7k,conv7l,conv7m])
    conv7n=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(merge12)
    conv7n = bn()(conv7n)




    merge13=concatenate([up7,conv7a,conv7b,conv7c,conv7d,conv7e,conv7f,conv7g,conv7h,conv7i,conv7j,conv7k,conv7l,conv7m,conv7n])
    conv7o=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(merge13)
    conv7o = bn()(conv7o)



    merge14=concatenate([up7,conv7a,conv7b,conv7c,conv7d,conv7e,conv7f,conv7g,conv7h,conv7i,conv7j,conv7k,conv7l,conv7m,conv7n,conv7o])
    conv7p=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(merge14)
    conv7p = bn()(conv7p)
github driving-behavior / DBNet / models / inception_v4_pm.py View on Github external
def conv2d_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1), bias=False):
    """
    Utility function to apply conv + BN.
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    channel_axis = -1
    x = Convolution2D(nb_filter, (nb_row, nb_col),
                      strides=subsample,
                      padding=border_mode,
                      use_bias=bias)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x
github BIGBALLON / cifar-10-cnn / 6_ResNeXt / ResNeXt_keras.py View on Github external
def bn_relu(x):
        x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
        x = Activation('relu')(x)
        return x
github xingjian-f / DeepLearning-OCR / architecture / vgg_merge.py View on Github external
conv2_2 = Convolution2D(128, 3, 3, border_mode='same', activation='relu')(conv2_1)
	bn2 = BatchNormalization(mode=0, axis=1)(conv2_2)
	pool2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn2)
	drop2 = Dropout(0.5)(pool2)
	# 3 conv
	conv3_1 = Convolution2D(256, 3, 3, border_mode='same', activation='relu')(drop2)
	conv3_2 = Convolution2D(256, 3, 3, border_mode='same', activation='relu')(conv3_1)
	conv3_3 = Convolution2D(256, 3, 3, border_mode='same', activation='relu')(conv3_2)
	bn3 = BatchNormalization(mode=0, axis=1)(conv3_3)
	pool3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn3)
	drop3 = Dropout(0.5)(pool3)
	# 4 conv
	conv4_1 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(drop3)
	conv4_2 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(conv4_1)
	conv4_3 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(conv4_2)
	bn4 = BatchNormalization(mode=0, axis=1)(conv4_3)
	pool4 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn4)
	drop4 = Dropout(0.5)(pool4)
	# 5 conv
	conv5_1 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(drop4)
	conv5_2 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(conv5_1)
	conv5_3 = Convolution2D(512, 3, 3, border_mode='same', activation='relu')(conv5_2)
	bn5 = BatchNormalization(mode=0, axis=1)(conv5_3)
	pool5 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn5)
	drop5 = Dropout(0.5)(pool5)
	# flaten
	flat = Flatten()(drop5)
	# 1 dense
	dense1 = Dense(4096, activation='relu')(flat)
	bn6 = BatchNormalization(mode=0, axis=1)(dense1)
	drop6 = Dropout(0.5)(bn6)
	# 2 dense
github philipperemy / very-deep-convnets-raw-waveforms / models.py View on Github external
def m5(num_classes=10):
    print('Using Model M5')
    m = Sequential()
    m.add(Conv1D(128,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(128,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(256,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',