Commit a56f81ac authored by Lafnoune Imane's avatar Lafnoune Imane
Browse files

commentaires prof

parent 689ccdd2
......@@ -41,14 +41,16 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = Activation("relu")(x) #sigmoid ou tanh
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)
x = Activation("relu")(x) #sigmoid ou tanh
# check to see if the regression node should be added
# la couche suivante ne sert à rien pour l'hybride, à garder pour le modèle cnn seule
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
......@@ -59,28 +61,28 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def create_mlp(dim, regress=True):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))
model.add(Dense(4, activation="relu"))
model.add(Dense(8, input_dim=dim, activation="relu"))#tanh
model.add(Dense(4, activation="relu")) # tanh
# add dense for regression
model.add(Dense(1, activation="linear"))
model.add(Dense(1, activation="linear")) #tanh
# return our model
return model
def create_mlp2(dim,regress = True):
def create_mlp2(dim,regress = True): #mieux que mlp,
model = Sequential()
model.add(GaussianNoise(0.2, input_shape=(dim,)))
model.add(Dense(8, activation="relu"))
model.add(Dense(4, activation="relu"))
# add dense for regression
model.add(Dense(1))
model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
return model
def create_hybrid(nb_attributes,shape=(240,240,1)):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_cnn(*shape)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput) #tanh
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment