Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
Bannier Delphine
Projet_FR
Commits
c0a364d5
Commit
c0a364d5
authored
Jun 02, 2021
by
Billy Amélie
Browse files
Change model structures and rerun CNN_inj_sup_4chann
parent
375c43d6
Changes
3
Expand all
Hide whitespace changes
Inline
Side-by-side
clean_notebooks/CNN_injection_superposition_4Chann.ipynb
View file @
c0a364d5
This diff is collapsed.
Click to expand it.
clean_notebooks/superposition_injection.h5
View file @
c0a364d5
No preview for this file type
processing/models.py
View file @
c0a364d5
...
...
@@ -17,7 +17,7 @@ from tensorflow import TensorShape
from
sklearn.model_selection
import
train_test_split
def
create_cnn
(
width
,
height
,
depth
,
filters
=
(
32
,
64
,
128
),
regress
=
False
):
def
create_cnn
(
width
,
height
,
depth
,
filters
=
(
32
,
64
,
128
),
regress
=
False
,
type_cnn
=
'simple'
):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape
=
(
height
,
width
,
depth
)
...
...
@@ -41,18 +41,19 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
# flatten the volume, then FC => RELU => BN => DROPOUT
x
=
Flatten
()(
x
)
x
=
Dense
(
16
)(
x
)
x
=
Activation
(
"
relu
"
)(
x
)
#sigmoid ou tanh
x
=
Activation
(
"
tanh
"
)(
x
)
#sigmoid ou tanh
x
=
BatchNormalization
(
axis
=
chanDim
)(
x
)
x
=
Dropout
(
0.5
)(
x
)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x
=
Dense
(
4
)(
x
)
x
=
Activation
(
"
relu
"
)(
x
)
#sigmoid ou tanh
x
=
Activation
(
"
tanh
"
)(
x
)
#sigmoid ou tanh
# check to see if the regression node should be added
# la couche suivante ne sert à rien pour l'hybride, à garder pour le modèle cnn seule
if
regress
:
x
=
Dense
(
1
,
activation
=
"linear"
)(
x
)
if
type_cnn
==
'simple'
:
if
regress
:
x
=
Dense
(
1
,
activation
=
"linear"
)(
x
)
# construct the CNN
model
=
Model
(
inputs
,
x
)
# return the CNN
...
...
@@ -61,28 +62,32 @@ def create_cnn(width, height, depth, filters=(32, 64, 128), regress=False):
def
create_mlp
(
dim
,
regress
=
True
):
# define our MLP network
model
=
Sequential
()
model
.
add
(
Dense
(
8
,
input_dim
=
dim
,
activation
=
"relu"
))
#tanh
model
.
add
(
Dense
(
4
,
activation
=
"relu"
))
# tanh
model
.
add
(
Dense
(
8
,
input_dim
=
dim
,
activation
=
"tanh"
))
#tanh
model
.
add
(
Dropout
(
0.3
))
model
.
add
(
Dense
(
4
,
activation
=
"tanh"
))
# tanh
# add dense for regression
model
.
add
(
Dense
(
1
,
activation
=
"linear"
))
#tanh
#
model.add(Dense(1, activation="linear")) #tanh
# return our model
return
model
def
create_mlp2
(
dim
,
regress
=
True
):
#mieux que mlp,
model
=
Sequential
()
model
.
add
(
GaussianNoise
(
0.2
,
input_shape
=
(
dim
,)))
model
.
add
(
Dense
(
8
,
activation
=
"relu"
))
model
.
add
(
Dense
(
4
,
activation
=
"relu"
))
model
.
add
(
Dropout
(
0.5
))
#modified from relu to tanh
model
.
add
(
Dense
(
8
,
activation
=
"tanh"
))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dense
(
4
,
activation
=
"tanh"
))
# add dense for regression
model
.
add
(
Dense
(
1
))
# couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
#
model.add(Dense(1)) # couche à enlever trop de couches dans ce modele, et surtout pas à 1, idem pour mlp pour hybride
return
model
def
create_hybrid
(
nb_attributes
,
shape
=
(
240
,
240
,
1
)):
# create cnn and mlp models
mlp
=
create_mlp
(
nb_attributes
)
cnn
=
create_cnn
(
*
shape
)
cnn
=
create_cnn
(
*
shape
,
type_cnn
=
'hybrid'
)
combinedInput
=
concatenate
([
mlp
.
output
,
cnn
.
output
])
x
=
Dense
(
4
,
activation
=
"
relu
"
)(
combinedInput
)
#tanh
x
=
Dense
(
4
,
activation
=
"
tanh
"
)(
combinedInput
)
#tanh
x
=
Dense
(
1
,
activation
=
"linear"
)(
x
)
model
=
Model
(
inputs
=
[
mlp
.
input
,
cnn
.
input
],
outputs
=
x
)
return
model
...
...
@@ -142,7 +147,7 @@ def create_transfer_learning(new_model, custom_model, modify_name,input_channel
x
=
new
.
output
x
=
GlobalAveragePooling2D
()(
x
)
x
=
Dropout
(
0.5
)(
x
)
x
=
Dense
(
1
)(
x
)
#
x = Dense(1)(x)
model
=
Model
(
new
.
input
,
x
)
for
layer
in
new
.
layers
:
...
...
@@ -155,7 +160,7 @@ def create_hybrid_transfer(nb_attributes,new_model, custom_model, modify_name,in
mlp
=
create_mlp
(
nb_attributes
)
cnn
=
create_transfer_learning
(
new_model
,
custom_model
,
modify_name
,
input_channel
)
combinedInput
=
concatenate
([
mlp
.
output
,
cnn
.
output
])
x
=
Dense
(
4
,
activation
=
"
relu
"
)(
combinedInput
)
x
=
Dense
(
4
,
activation
=
"
tanh
"
)(
combinedInput
)
x
=
Dense
(
1
,
activation
=
"linear"
)(
x
)
model
=
Model
(
inputs
=
[
mlp
.
input
,
cnn
.
input
],
outputs
=
x
)
return
model
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment