Commit 04389b6c authored by Billy Amélie's avatar Billy Amélie
Browse files

Merge branch 'AmelieBranch' into 'master'

AmelieBranch merge

See merge request !17
parents a56f81ac c0a364d5
......@@ -17,7 +17,7 @@ from tensorflow import TensorShape
from sklearn.model_selection import train_test_split
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False, type_cnn='simple'):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape = (height, width, depth)
......@@ -41,18 +41,19 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x) #sigmoid ou tanh
x = Activation("tanh")(x) #sigmoid ou tanh
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x) #sigmoid ou tanh
x = Activation("tanh")(x) #sigmoid ou tanh
# check to see if the regression node should be added
# la couche suivante ne sert à rien pour l'hybride, à garder pour le modèle cnn seule
if regress:
x = Dense(1, activation="linear")(x)
if type_cnn == 'simple':
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
......@@ -61,28 +62,32 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def create_mlp(dim, regress=True):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))#tanh
model.add(Dense(4, activation="relu")) # tanh
model.add(Dense(8, input_dim=dim, activation="tanh"))#tanh
model.add(Dropout(0.3))
model.add(Dense(4, activation="tanh")) # tanh
# add dense for regression
model.add(Dense(1, activation="linear")) #tanh
#model.add(Dense(1, activation="linear")) #tanh
# return our model
return model
def create_mlp2(dim,regress = True): #mieux que mlp,
model = Sequential()
model.add(GaussianNoise(0.2, input_shape=(dim,)))
model.add(Dense(8, activation="relu"))
model.add(Dense(4, activation="relu"))
model.add(Dropout(0.5))
#modified from relu to tanh
model.add(Dense(8, activation="tanh"))
model.add(Dropout(0.5))
model.add(Dense(4, activation="tanh"))
# add dense for regression
model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
#model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
return model
def create_hybrid(nb_attributes,shape=(240,240,1)):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_cnn(*shape)
cnn = create_cnn(*shape,type_cnn='hybrid')
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput) #tanh
x = Dense(4, activation="tanh")(combinedInput) #tanh
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
......@@ -142,7 +147,7 @@ def create_transfer_learning(new_model, custom_model, modify_name,input_channel
x = new.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
#x = Dense(1)(x)
model = Model(new.input, x)
for layer in new.layers:
......@@ -155,7 +160,7 @@ def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,in
mlp = create_mlp(nb_attributes)
cnn = create_transfer_learning(new_model, custom_model, modify_name,input_channel)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(4, activation="tanh")(combinedInput)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment