models.py 7.15 KB
Newer Older
1
from tensorflow.keras.models import Sequential
2
from tensorflow.keras.models import Model
3
4
5
6
7
8
9
10
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
Bannier Delphine's avatar
Bannier Delphine committed
11
from tensorflow.keras.layers import GlobalAveragePooling2D
12
from tensorflow.keras.layers import concatenate
Bannier Delphine's avatar
Bannier Delphine committed
13
from tensorflow.keras.layers import GaussianNoise
Bannier Delphine's avatar
Bannier Delphine committed
14
15
from tensorflow import TensorShape
import numpy as np
Bannier Delphine's avatar
test    
Bannier Delphine committed
16
from tensorflow import TensorShape
Bannier Delphine's avatar
Bannier Delphine committed
17
from sklearn.model_selection import train_test_split
Bannier Delphine's avatar
test    
Bannier Delphine committed
18
19


20
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False, type_cnn='simple'):
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    # initialize the input shape and channel dimension, assuming
    # TensorFlow/channels-last ordering
    inputShape = (height, width, depth)
    chanDim = -1


    # define the model input
    inputs = Input(shape=inputShape)
    # loop over the number of filters
    for (i, f) in enumerate(filters):
        # if this is the first CONV layer then set the input
        # appropriately
        if i == 0:
            x = inputs
        # CONV => RELU => BN => POOL
        x = Conv2D(f, (3, 3), padding="same")(x)
        x = Activation("relu")(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

    # flatten the volume, then FC => RELU => BN => DROPOUT
    x = Flatten()(x)
    x = Dense(16)(x)
44
    x = Activation("tanh")(x) #sigmoid ou tanh
45
46
47
48
49
    x = BatchNormalization(axis=chanDim)(x)
    x = Dropout(0.5)(x)
    # apply another FC layer, this one to match the number of nodes
    # coming out of the MLP
    x = Dense(4)(x)
50
    x = Activation("tanh")(x) #sigmoid ou tanh
51
    # check to see if the regression node should be added
Lafnoune Imane's avatar
Lafnoune Imane committed
52
53
    
    # la couche suivante ne sert à rien pour l'hybride, à garder pour le modèle cnn seule
54
55
    if type_cnn == 'simple':
        if regress:
Bannier Delphine's avatar
Bannier Delphine committed
56
            x = Dropout(0.5)(x)
57
            x = Dense(1, activation="linear")(x)
58
59
60
    # construct the CNN
    model = Model(inputs, x)
    # return the CNN
61
62
63
64
65
    return model

def create_mlp(dim, regress=True):
    # define our MLP network
    model = Sequential()
66
67
68
    model.add(Dense(8, input_dim=dim, activation="tanh"))#tanh
    model.add(Dropout(0.3))
    model.add(Dense(4, activation="tanh")) # tanh
69
    # add dense for regression
70
    #model.add(Dense(1, activation="linear")) #tanh
71
72
73
    # return our model
    return model

Lafnoune Imane's avatar
Lafnoune Imane committed
74
def create_mlp2(dim,regress = True): #mieux que mlp, 
Bannier Delphine's avatar
Bannier Delphine committed
75
76
    model = Sequential()
    model.add(GaussianNoise(0.2, input_shape=(dim,)))
77
78
79
80
81
    model.add(Dropout(0.5))
    #modified from relu to tanh
    model.add(Dense(8, activation="tanh"))
    model.add(Dropout(0.5))
    model.add(Dense(4, activation="tanh"))
Bannier Delphine's avatar
Bannier Delphine committed
82
    # add dense for regression
83
    #model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
Bannier Delphine's avatar
Bannier Delphine committed
84
85
    return model

86
87
88
def create_hybrid(nb_attributes,shape=(240,240,1)):
    # create cnn and mlp models
    mlp = create_mlp(nb_attributes)
89
    cnn = create_cnn(*shape,type_cnn='hybrid')
Lafnoune Imane's avatar
Lafnoune Imane committed
90
    combinedInput = concatenate([mlp.output, cnn.output]) 
91
    x = Dense(4, activation="tanh")(combinedInput) #tanh
92
    x = Dropout(0.5)(x)
93
94
    x = Dense(1, activation="linear")(x)
    model = Model(inputs=[mlp.input, cnn.input], outputs=x)
Bannier Delphine's avatar
Bannier Delphine committed
95
96
    return model

Bannier Delphine's avatar
test    
Bannier Delphine committed
97

Bannier Delphine's avatar
Bannier Delphine committed
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133


def multify_weights(kernel, out_channels):
    # Expand weights dimension to match new input channels
    mean_1d = np.mean(kernel, axis=-2).reshape(kernel[:,:,-1:,:].shape)
    tiled = np.tile(mean_1d, (out_channels, 1))
    return(tiled)



def weightify(model_orig, custom_model, layer_modify,input_channel):
    # Loop through layers of both original model 
    # and custom model and copy over weights 
    # layer_modify refers to first convolutional layer
    layer_to_modify = [layer_modify]

    conf = custom_model.get_config()
    layer_names = [conf['layers'][x]['name'] for x in range(len(conf['layers']))]

    for layer in model_orig.layers:
        if layer.name in layer_names:
            if layer.get_weights() != []:
                target_layer = custom_model.get_layer(layer.name)
                #print(len(layer.get_weights()))
                if layer.name in layer_to_modify:    
                    

                    kernels = layer.get_weights()[0]
                    #biases  = layer.get_weights()[1]

                    kernels_extra_channel = np.concatenate((kernels,
                                                          multify_weights(kernels, input_channel - 3)),
                                                          axis=-2)
                    
                    
                    target_layer= Conv2D(32, (3, 3), activation='relu', padding='valid',use_bias=False)
Lafnoune Imane's avatar
Lafnoune Imane committed
134
                    input_shape = TensorShape([None, 240, 240, input_channel])  # to define h, w, c based on shape of layer input
Bannier Delphine's avatar
Bannier Delphine committed
135
136
137
138
139
140
141
142
143
144
145
                    target_layer.build(input_shape)
                    target_layer.set_weights([kernels_extra_channel])
                    #target_layer.set_weights([kernels_extra_channel, biases])
                    #target_layer.set_weights(kernels)
                    #target_layer.trainable = False

                else:
                    target_layer.set_weights(layer.get_weights())
                    target_layer.trainable = False
    return custom_model

146
def create_transfer_learning(new_model, custom_model, modify_name,input_channel = 4, weights = False, hybrid = False):
Bannier Delphine's avatar
Bannier Delphine committed
147
148
149
150
151
    # create cnn with transfer learning  
    new = weightify(new_model,custom_model,modify_name,input_channel)
    x = new.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x) 
152
153
    if hybrid == False :
        x = Dense(1, activation='linear')(x)
Bannier Delphine's avatar
Bannier Delphine committed
154
155
    model = Model(new.input, x)

156
157
158
159
    if weights == True:
        for layer in new.layers:
            layer.trainable = False

Bannier Delphine's avatar
Bannier Delphine committed
160
161
    return model

162
def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,input_channel,weight = False):
Bannier Delphine's avatar
Bannier Delphine committed
163
164
    # create cnn and mlp models
    mlp = create_mlp(nb_attributes)
165
    cnn = create_transfer_learning(new_model, custom_model, modify_name,input_channel,weights = weight, hybrid = True)
Bannier Delphine's avatar
Bannier Delphine committed
166
    combinedInput = concatenate([mlp.output, cnn.output])
167
    x = Dense(4, activation="tanh")(combinedInput)
168
    x = Dropout(0.5)(x)
Bannier Delphine's avatar
Bannier Delphine committed
169
170
    x = Dense(1, activation="linear")(x)
    model = Model(inputs=[mlp.input, cnn.input], outputs=x)
Bannier Delphine's avatar
Bannier Delphine committed
171
172
    return model

Billy Amélie's avatar
Billy Amélie committed
173

Bannier Delphine's avatar
Bannier Delphine committed
174
175
176
177
178
179
180
181
182
183
def fit_and_evaluate(t_x, val_x, t_y, val_y, EPOCHS=30, BATCH_SIZE=8,model = None,es= None,cp=None):
    """
    `es`: earlystopping keras object
    `cp`: checkpoint keras object
    """
    mod = None
    mod = model
    results = mod.fit(t_x, t_y, epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=[es, cp], 
              verbose=1, validation_split=0.1)  
    print("Val Score: ", mod.evaluate(val_x, val_y))
184
185
186
    return results


187
188
189
190
def create_regression(training_df_X, training_df_y, test_df_X, test_df_y, model):
    model.fit(training_df_X, training_df_y)
    print('Training accuracy :', model.score(training_df_X, training_df_y))
    print('Test accuracy :', model.score(test_df_X, test_df_y))
191