Commit 332d7088 authored by Billy Amélie's avatar Billy Amélie
Browse files

add function to models

parent aedf6247
......@@ -8,7 +8,11 @@ from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import concatenate
from tensorflow import TensorShape
import numpy as np
def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
......@@ -70,6 +74,79 @@ def create_hybrid(nb_attributes,shape=(240,240,1)):
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
def multify_weights(kernel, out_channels):
# Expand weights dimension to match new input channels
mean_1d = np.mean(kernel, axis=-2).reshape(kernel[:,:,-1:,:].shape)
tiled = np.tile(mean_1d, (out_channels, 1))
return(tiled)
def weightify(model_orig, custom_model, layer_modify,input_channel):
# Loop through layers of both original model
# and custom model and copy over weights
# layer_modify refers to first convolutional layer
layer_to_modify = [layer_modify]
conf = custom_model.get_config()
layer_names = [conf['layers'][x]['name'] for x in range(len(conf['layers']))]
for layer in model_orig.layers:
if layer.name in layer_names:
if layer.get_weights() != []:
target_layer = custom_model.get_layer(layer.name)
#print(len(layer.get_weights()))
if layer.name in layer_to_modify:
kernels = layer.get_weights()[0]
#biases = layer.get_weights()[1]
kernels_extra_channel = np.concatenate((kernels,
multify_weights(kernels, input_channel - 3)),
axis=-2)
target_layer= Conv2D(32, (3, 3), activation='relu', padding='valid',use_bias=False)
input_shape = TensorShape([None, 240, 240, 4]) # to define h, w, c based on shape of layer input
target_layer.build(input_shape)
target_layer.set_weights([kernels_extra_channel])
#target_layer.set_weights([kernels_extra_channel, biases])
#target_layer.set_weights(kernels)
#target_layer.trainable = False
else:
target_layer.set_weights(layer.get_weights())
target_layer.trainable = False
return custom_model
def create_transfer_learning(new_model, custom_model, modify_name,input_channel = 4):
# create cnn with transfer learning
new = weightify(new_model,custom_model,modify_name,input_channel)
x = new.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
model = Model(new.input, x)
for layer in new.layers:
layer.trainable = False
return model
def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,input_channel):
# create cnn and mlp models
mlp = create_mlp(nb_attributes)
cnn = create_transfer_learning(new_model, custom_model, modify_name,input_channel)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
return model
def create_regression(training_df_X, training_df_y, test_df_X, test_df_y, model):
model.fit(training_df_X, training_df_y)
print('Training accuracy :', model.score(training_df_X, training_df_y))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment