Commit 0e8205dc authored by Billy Amélie's avatar Billy Amélie
Browse files
parents e276b30e 3f0a09d4
This diff is collapsed.
from keras.models import Model, Sequential
from keras import backend as K
def create_dropout_predict_function(model, dropout):
"""
Create a keras function to predict with dropout
model : keras model
dropout : fraction dropout to apply to all layers
Returns
predict_with_dropout : keras function for predicting with dropout
"""
# Load the config of the original model
conf = model.get_config()
# Add the specified dropout to all layers
for layer in conf['layers']:
# Dropout layers
if layer["class_name"]=="Dropout":
layer["config"]["rate"] = dropout
# Recurrent layers with dropout
elif "dropout" in layer["config"].keys():
layer["config"]["dropout"] = dropout
# Create a new model with specified dropout
if type(model)==Sequential:
# Sequential
model_dropout = Sequential.from_config(conf)
else:
# Functional
model_dropout = Model.from_config(conf)
model_dropout.set_weights(model.get_weights())
# Create a function to predict with the dropout on
predict_with_dropout = K.function([model_dropout.inputs,K.learning_phase()], model_dropout.outputs)
print(model_dropout.inputs)
return predict_with_dropout
\ No newline at end of file
......@@ -8,8 +8,8 @@ import math
def evaluate_hybrid(model,df, trainAttrX, trainImagesX, trainY,sc):
logging.info("predicting ...")
preds = model.predict([trainAttrX, trainImagesX])
diff = sc.inverse_transform(preds.flatten()) - sc.inverse_transform(trainY)
percentDiff = (diff / sc.inverse_transform(trainY)) * 100
diff = preds.flatten() - trainY
percentDiff = (diff / trainY) * 100
absPercentDiff = np.abs(percentDiff)
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
......@@ -41,11 +41,10 @@ def evaluate_mlp(model,df, trainAttrX, trainY,sc):
print("mean difference : {:.2f}%, std: {:.2f}%".format(mean, std))
return preds
def compute_score(y_true, y_pred):
sigma = ( y_true - y_pred ) #########
def compute_score(y_true, y_pred, sigma):
fvc_pred = y_pred
sigma_clip = np.maximum(sigma, 70)
delta = np.minimum(abs(y_true - fvc_pred),1000)
sq2 = math.sqrt(2)
metric = -(delta / sigma_clip)*sq2 - np.log(sigma_clip* sq2)
return (sigma, np.mean(metric))
\ No newline at end of file
return (np.mean(metric))
\ No newline at end of file
......@@ -17,7 +17,7 @@ from tensorflow import TensorShape
from sklearn.model_selection import train_test_split
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False, type_cnn='simple'):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape = (height, width, depth)
......@@ -41,18 +41,20 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x) #sigmoid ou tanh
x = Activation("tanh")(x) #sigmoid ou tanh
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x) #sigmoid ou tanh
x = Activation("tanh")(x) #sigmoid ou tanh
# check to see if the regression node should be added
# la couche suivante ne sert à rien pour l'hybride, à garder pour le modèle cnn seule
if regress:
x = Dense(1, activation="linear")(x)
if type_cnn == 'simple':
if regress:
x = Dropout(0.5)(x)
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
......@@ -61,28 +63,33 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def create_mlp(dim, regress=True):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))#tanh
model.add(Dense(4, activation="relu")) # tanh
model.add(Dense(8, input_dim=dim, activation="tanh"))#tanh
model.add(Dropout(0.3))
model.add(Dense(4, activation="tanh")) # tanh
# add dense for regression
model.add(Dense(1, activation="linear")) #tanh
#model.add(Dense(1, activation="linear")) #tanh
# return our model
return model
def create_mlp2(dim,regress = True): #mieux que mlp,
model = Sequential()
model.add(GaussianNoise(0.2, input_shape=(dim,)))
model.add(Dense(8, activation="relu"))
model.add(Dense(4, activation="relu"))
model.add(Dropout(0.5))
#modified from relu to tanh
model.add(Dense(8, activation="tanh"))
model.add(Dropout(0.5))
model.add(Dense(4, activation="tanh"))
# add dense for regression
model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
#model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
return model
def create_hybrid(nb_attributes,shape=(240,240,1)):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_cnn(*shape)
cnn = create_cnn(*shape,type_cnn='hybrid')
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput) #tanh
x = Dense(4, activation="tanh")(combinedInput) #tanh
x = Dropout(0.5)(x)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
......@@ -142,7 +149,7 @@ def create_transfer_learning(new_model, custom_model, modify_name,input_channel
x = new.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
#x = Dense(1)(x)
model = Model(new.input, x)
for layer in new.layers:
......@@ -155,7 +162,8 @@ def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,in
mlp = create_mlp(nb_attributes)
cnn = create_transfer_learning(new_model, custom_model, modify_name,input_channel)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(4, activation="tanh")(combinedInput)
x = Dropout(0.5)(x)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment