Commit 486b126f authored by Lafnoune Imane's avatar Lafnoune Imane
Browse files

tl

parent 3ec9d6c9
This diff is collapsed.
import logging
import numpy as np
import math
#### Fonctions à modifier !!!
def evaluate_hybrid(model,df, trainAttrX, trainImagesX, trainY,sc):
logging.info("predicting ...")
preds = model.predict([trainAttrX, trainImagesX])
diff = sc.inverse_transform(preds.flatten()) - sc.inverse_transform(trainY)
percentDiff = (diff / sc.inverse_transform(trainY)) * 100
absPercentDiff = np.abs(percentDiff)
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
print("avg. FVC: {}, std FVC {}".format(df["FVC"].mean(), df["FVC"].std()))
print("mean difference : {:.2f}%, std: {:.2f}%".format(mean, std))
return preds
def evaluate_cnn(model,df, trainImagesX, trainY,sc):
logging.info("predicting ...")
preds = model.predict(trainImagesX)
diff = sc.inverse_transform(preds.flatten()) - sc.inverse_transform(trainY)
percentDiff = (diff / sc.inverse_transform(trainY)) * 100
absPercentDiff = np.abs(percentDiff)
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
print("avg. FVC: {}, std FVC {}".format(df["FVC"].mean(), df["FVC"].std()))
print("mean difference : {:.2f}%, std: {:.2f}%".format(mean, std))
return preds
def evaluate_mlp(model,df, trainAttrX, trainY,sc):
logging.info("predicting ...")
preds = model.predict(trainAttrX)
diff = sc.inverse_transform(preds.flatten()) - sc.inverse_transform(trainY)
percentDiff = (diff / sc.inverse_transform(trainY)) * 100
absPercentDiff = np.abs(percentDiff)
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
print("avg. FVC: {}, std FVC {}".format(df["FVC"].mean(), df["FVC"].std()))
print("mean difference : {:.2f}%, std: {:.2f}%".format(mean, std))
return preds
def compute_score(y_true, y_pred):
sigma = ( y_true - y_pred ) #########
fvc_pred = y_pred
sigma_clip = np.maximum(sigma, 70)
delta = np.minimum(abs(y_true - fvc_pred),1000)
sq2 = math.sqrt(2)
metric = -(delta / sigma_clip)*sq2 - np.log(sigma_clip* sq2)
return (sigma, np.mean(metric))
\ No newline at end of file
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import concatenate
from tensorflow import TensorShape
import numpy as np
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape = (height, width, depth)
chanDim = -1
# define the model input
inputs = Input(shape=inputShape)
# loop over the number of filters
for (i, f) in enumerate(filters):
# if this is the first CONV layer then set the input
# appropriately
if i == 0:
x = inputs
# CONV => RELU => BN => POOL
x = Conv2D(f, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)
# check to see if the regression node should be added
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
return model
def create_mlp(dim, regress=True):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))
model.add(Dense(4, activation="relu"))
# add dense for regression
model.add(Dense(1, activation="linear"))
# return our model
return model
def create_hybrid(nb_attributes,shape=(240,240,1)):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_cnn(*shape)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
def multify_weights(kernel, out_channels):
# Expand weights dimension to match new input channels
mean_1d = np.mean(kernel, axis=-2).reshape(kernel[:,:,-1:,:].shape)
tiled = np.tile(mean_1d, (out_channels, 1))
return(tiled)
def weightify(model_orig, custom_model, layer_modify,input_channel):
# Loop through layers of both original model
# and custom model and copy over weights
# layer_modify refers to first convolutional layer
layer_to_modify = [layer_modify]
conf = custom_model.get_config()
layer_names = [conf['layers'][x]['name'] for x in range(len(conf['layers']))]
for layer in model_orig.layers:
if layer.name in layer_names:
if layer.get_weights() != []:
target_layer = custom_model.get_layer(layer.name)
#print(len(layer.get_weights()))
if layer.name in layer_to_modify:
kernels = layer.get_weights()[0]
#biases = layer.get_weights()[1]
kernels_extra_channel = np.concatenate((kernels,
multify_weights(kernels, input_channel - 3)),
axis=-2)
target_layer= Conv2D(32, (3, 3), activation='relu', padding='valid',use_bias=False)
input_shape = TensorShape([None, 240, 240, input_channel]) # to define h, w, c based on shape of layer input
target_layer.build(input_shape)
target_layer.set_weights([kernels_extra_channel])
#target_layer.set_weights([kernels_extra_channel, biases])
#target_layer.set_weights(kernels)
#target_layer.trainable = False
else:
target_layer.set_weights(layer.get_weights())
target_layer.trainable = False
return custom_model
def create_transfer_learning(new_model, custom_model, modify_name,input_channel = 4):
# create cnn with transfer learning
new = weightify(new_model,custom_model,modify_name,input_channel)
x = new.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
model = Model(new.input, x)
for layer in new.layers:
layer.trainable = False
return model
def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,input_channel):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_transfer_learning(new_model, custom_model, modify_name,input_channel)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
def create_regression(training_df_X, training_df_y, test_df_X, test_df_y, model):
model.fit(training_df_X, training_df_y)
print('Training accuracy :', model.score(training_df_X, training_df_y))
print('Test accuracy :', model.score(test_df_X, test_df_y))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment