Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
Ulmer Louis
ASI4_LouisULMER
Commits
98d649be
Commit
98d649be
authored
Jan 09, 2019
by
Ulmer Louis
Browse files
projet iml
parent
2ea462d9
Changes
9
Hide whitespace changes
Inline
Side-by-side
.DS_Store
View file @
98d649be
No preview for this file type
IML/.DS_Store
View file @
98d649be
No preview for this file type
IML/TP4-optim_sans_contraintes/.DS_Store
View file @
98d649be
No preview for this file type
IML/TP4-optim_sans_contraintes/TP4_IML.m~
deleted
100644 → 0
View file @
2ea462d9
%% TD OPTIM SANS CONTRAINTE
%% Q1
%% Q2 ecrire une fction d=mongradient(theta)
theta=[1 1]'
d=mongradientRosen(theta)
%% Q3 ecrire une fction cout=moncritere(theta)
cout=moncritereRosen(theta)
%% Q4 a partir du script TpOptim2.m implementez la methode du gradient
%% TpOptim2.m
clear all
close all
clc
%% trace des lignes de niveau de J
n = 100;
[X, Y] = meshgrid(linspace(-1.25, 2.5, n), linspace(-1.75, 2, n));
ptx = reshape(X, n*n,1);
pty = reshape(Y, n*n,1);
pt = [ptx pty];
% Define the function J = f(\theta)
Jmat = (1-pt(:,1)).^2 + 100*(pt(:, 2) - pt(:, 1).^2).^2;
%exp(-0.1)*(exp(pt*a) + exp(pt*b) + exp(pt*c));
% Create the surface plot using contour command
figure(1);
contour(X, Y, reshape(Jmat, n, n), [40:-5:0 1 0], 'linewidth', 1.5);
colorbar
axis tight
set(gca, 'fontsize', 18)
% a decomenter si vous utilisez un pas fixe a chaque iteration
% pas = 0.1;
% solution initiale
theta0 = [-1; 0];
% trace de theta0
figure(1), hold on
h = plot(theta0(1,:), theta0(2,:), 'ro');
set(h, 'MarkerSize', 8, 'markerfacecolor', 'r');
text(theta0(1,1), theta0(2,1)-0.175, '\theta_0', 'fontsize', 16)
theta= [-1; 0];
k=1;
kmax=15;
criteres=[];
tic;
while ((norm(mongradientRosen(theta))>10^-3) || (k<kmax)) %non convergence
X = sprintf('iter num %d',k);
disp(X)
% calcul de J
crit=moncritereRosen(theta);
% calcul de la direction de descente
direction = -mongradientRosen(theta);
% Determination du pas de recherche
pas = 1e-3;
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction;
k=k+1;
% trace du theta courant
figure(1), hold on
%subplot(2,1,1)
h = plot(theta(1,:), theta(2,:), 'ro');
set(h, 'MarkerSize', 3, 'markerfacecolor', 'r');
drawnow
criteres=[criteres crit];
%subplot(2,1,2),
figure(2),hold on
plot(criteres)
drawnow
end
temps_gradient_pas_fixe=toc
%%
clear all
close all
clc
%% Methode de Newton Regularisee
n = 100;
[X, Y] = meshgrid(linspace(-1.25, 2.5, n), linspace(-1.75, 2, n));
ptx = reshape(X, n*n,1);
pty = reshape(Y, n*n,1);
pt = [ptx pty];
% Define the function J = f(\theta)
Jmat = (1-pt(:,1)).^2 + 100*(pt(:, 2) - pt(:, 1).^2).^2;
%exp(-0.1)*(exp(pt*a) + exp(pt*b) + exp(pt*c));
% Create the surface plot using contour command
figure(1);
contour(X, Y, reshape(Jmat, n, n), [40:-5:0 1 0], 'linewidth', 1.5);
colorbar
axis tight
set(gca, 'fontsize', 18)
% A Completer ...
% a decomenter si vous utilisez un pas fixe a chaque iteration
% pas = 0.1;
% solution initiale
theta0 = [-1; 0];
% trace de theta0
figure(1), hold on
h = plot(theta0(1,:), theta0(2,:), 'ro');
set(h, 'MarkerSize', 8, 'markerfacecolor', 'r');
text(theta0(1,1), theta0(2,1)-0.175, '\theta_0', 'fontsize', 16)
k=1;
kmax=15;
criteres=[];
theta= [-1; 0];
tic;
while ((norm(mongradientRosen(theta))>10^-3) && (k<kmax)) %non convergence
%compteur d'iter k
X = sprintf('iter num %d',k);
disp(X)
%calcul du gradient
grad=mongradientRosen(theta);
H=monhessienRosen(theta);
% calcul de J
crit=moncritereRosen(theta);
%on s'assure que H est def pos (det(H)=0)
[L,test]=chol(H,'lower');
if test
tau=norm(H,'fro');
H=H+tau/2*eye(2);
end
% calcul de la direction de descente
direction=-H\grad;
% Determination du pas de recherche
pas = 0.01;
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction;
k=k+1;
% trace du theta courant
figure(1), hold on
%subplot(2,1,1)
h = plot(theta(1,:), theta(2,:), 'ro');
set(h, 'MarkerSize', 3, 'markerfacecolor', 'r');
%drawnow
criteres=[criteres crit];
%subplot(2,1,2),
figure(2),hold on
plot(criteres)
%drawnow
end
temps_newton_regularisee=toc
% figure(2), hold on
% plot(crit)
% drawnow
%% Gradient a pas variable (Armijo):
% trace des lignes de niveau de J
n = 100;
[X, Y] = meshgrid(linspace(-1.25, 2.5, n), linspace(-1.75, 2, n));
ptx = reshape(X, n*n,1);
pty = reshape(Y, n*n,1);
pt = [ptx pty];
% Define the function J = f(\theta)
Jmat = (1-pt(:,1)).^2 + 100*(pt(:, 2) - pt(:, 1).^2).^2;
%exp(-0.1)*(exp(pt*a) + exp(pt*b) + exp(pt*c));
% Create the surface plot using contour command
figure(1);
contour(X, Y, reshape(Jmat, n, n), [40:-5:0 1 0], 'linewidth', 1.5);
colorbar
axis tight
set(gca, 'fontsize', 18)
% a decomenter si vous utilisez un pas fixe a chaque iteration
% pas = 0.1;
% solution initiale
theta0 = [-1; 0];
% trace de theta0
figure(1), hold on
h = plot(theta0(1,:), theta0(2,:), 'ro');
set(h, 'MarkerSize', 8, 'markerfacecolor', 'r');
text(theta0(1,1), theta0(2,1)-0.175, '\theta_0', 'fontsize', 16)
tic;
theta= theta;
k=1;
kmax=100;
alpha=0.15;
beta=2;
JIter=[];
g=mongradientRosen(theta);
J_new=moncritereRosen(theta);
J_old=J_new+1;
pas=10^(-3);
while ((norm(mongradientRosen(theta))>10^-3) && (k<kmax)) %non convergence
X = sprintf('iter num %d',k);
disp(X)
% calcul de J
J_new=moncritereRosen(theta);
JIter=[JIter; J_new];
if (J_new < J_old)
pas=(1+alpha)*pas;
J_old=J_new;
else
% calcul de la direction de descente
direction = -g;
theta=theta-pas*direction; % on revient en arriere
pas=pas/beta; % on diminue le pas
end
% calcul de la direction de descente
g=mongradientRosen(theta);
direction = -g;
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction
k=k+1;
% % trace du theta courant
% figure(1), hold on
% %subplot(2,1,1)
% h = plot(theta(1,:), theta(2,:), 'ro');
% set(h, 'MarkerSize', 3, 'markerfacecolor', 'r');
% drawnow
% %criteres=[criteres crit];
end
temps_gradient_armijo=toc
figure(2),hold on
hold on
plot(1:length(JIter), JIter, 'o-');
grid on
title('Cout a chaque iteration (pas variable)');
hold off
%% Fonction gradient a pas fixe
epsilon=10^(-3); % donne dans le sujet
nbIterMax=10000;
pas=10^(-3);
tic;
[JIter, theta]=gradient_pas_fixe(theta0, pas, epsilon, nbIterMax)
temps_gradient_pas_fixe=toc
theta_th=[1;1];
erreur_gradient_pas_fixe=(norm(theta-theta_th))
figure
hold on
plot(1:length(JIter), JIter, 'o-');
grid on
title('Cout a chaque iteration (pas variable)');
hold off
%% Fonction gradient a pas variable
epsilon=10^(-3); % donne dans le sujet
nbIterMax=10000;
pas=10^(-3);
alpha=0.15;
beta=2;
tic;
[JIter, theta]=gradient_pas_variable(theta0, pas, epsilon, nbIterMax, alpha, beta)
temps_gradient_pas_variable=toc
theta_th=[1;1];
erreur_gradient_pas_variable=(norm(theta-theta_th))
figure
hold on
plot(1:length(JIter), JIter, 'o-');
grid on
title('Cout a chaque iteration (pas variable)');
hold off
%% Fonction Newton reg
epsilon=10^(-3); % donne dans le sujet
nbIterMax=10000;
pas=10^(-3);
tic;
[JIter, theta]=newton_reg(theta0, pas, epsilon, nbIterMax)
temps_newton_regularisee=toc
erreur_newton_reg=(norm(theta-theta_th))
figure
hold on
plot(1:length(JIter), JIter, 'ro-');
grid on
title('Cout a chaque iteration (newton regularise)');
hold off
%%
figure
bar({'erreur_gradient_pas_fixe','erreur_gradient_pas_variable','erreur_newton_reg'},[erreur_gradient_pas_fixe erreur_gradient_pas_variable erreur_newton_reg])
set(gca,'XTickLabel',{'MODEL1'})
IML/TP4-optim_sans_contraintes/TP4_IML_new.m~
deleted
100644 → 0
View file @
2ea462d9
%% TP4 : Optimisation sans contrainte :
clear all
close all
clc
%% trace des lignes de niveau de J
n = 100;
[X, Y] = meshgrid(linspace(-1.25, 2.5, n), linspace(-1.75, 2, n));
ptx = reshape(X, n*n,1);
pty = reshape(Y, n*n,1);
pt = [ptx pty];
% Define the function J = f(\theta)
Jmat = (1-pt(:,1)).^2 + 100*(pt(:, 2) - pt(:, 1).^2).^2;
%exp(-0.1)*(exp(pt*a) + exp(pt*b) + exp(pt*c));
% Create the surface plot using contour command
figure(1);
contour(X, Y, reshape(Jmat, n, n), [40:-5:0 1 0], 'linewidth', 1.5);
colorbar
axis tight
set(gca, 'fontsize', 18)
% A Completer ...
% a decomenter si vous utilisez un pas fixe a chaque iteration
% pas = 0.1;
% solution initiale
theta0 = [-1; 0];
% trace de theta0
figure(1), hold on
h = plot(theta0(1,:), theta0(2,:), 'ro');
set(h, 'MarkerSize', 8, 'markerfacecolor', 'r');
text(theta0(1,1), theta0(2,1)-0.175, '\theta_0', 'fontsize', 16)
while non convergence
% calcul de J
J=mongradientRosen(theta0);
% calcul de la direction de descente
direction = -J
% Determination du pas de recherche
pas = 1
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction;
% trace du theta courant
end
% trace du critere en fonction des iterations
\ No newline at end of file
IML/TP4-optim_sans_contraintes/gradient_pas_fixe.m~
deleted
100644 → 0
View file @
2ea462d9
function [JIter, theta]=gradient_pas_fixe(theta0, pas, epsilon, kmax)
theta= theta0;
k=1;
kmax=15;
JIter=[];
tic;
while ((norm(mongradientRosen(theta))>10^-3) || (k<kmax)) %non convergence
X = sprintf('iter num %d',k);
disp(X)
% calcul de J
crit=moncritereRosen(theta);
JIter=[JIter;crit];
% calcul de la direction de descente
direction = -mongradientRosen(theta);
% Determination du pas de recherche
%voir varia pas
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction;
k=k+1;
end
end
IML/TP4-optim_sans_contraintes/newton_reg.m~
deleted
100644 → 0
View file @
2ea462d9
function [JIter, theta]=newton_re(theta0, pas, epsilon, nbIterMax, alpha, beta)
n = 100;
[X, Y] = meshgrid(linspace(-1.25, 2.5, n), linspace(-1.75, 2, n));
ptx = reshape(X, n*n,1);
pty = reshape(Y, n*n,1);
pt = [ptx pty];
% Define the function J = f(\theta)
Jmat = (1-pt(:,1)).^2 + 100*(pt(:, 2) - pt(:, 1).^2).^2;
%exp(-0.1)*(exp(pt*a) + exp(pt*b) + exp(pt*c));
% Create the surface plot using contour command
figure(1);
contour(X, Y, reshape(Jmat, n, n), [40:-5:0 1 0], 'linewidth', 1.5);
colorbar
axis tight
set(gca, 'fontsize', 18)
% A Completer ...
% a decomenter si vous utilisez un pas fixe a chaque iteration
% pas = 0.1;
% solution initiale
theta0 = [-1; 0];
% trace de theta0
figure(1), hold on
h = plot(theta0(1,:), theta0(2,:), 'ro');
set(h, 'MarkerSize', 8, 'markerfacecolor', 'r');
text(theta0(1,1), theta0(2,1)-0.175, '\theta_0', 'fontsize', 16)
k=1;
kmax=15;
criteres=[];
theta= [-1; 0];
tic;
while ((norm(mongradientRosen(theta))>10^-3) && (k<kmax)) %non convergence
%compteur d'iter k
X = sprintf('iter num %d',k);
disp(X)
%calcul du gradient
grad=mongradientRosen(theta);
H=monhessienRosen(theta);
% calcul de J
crit=moncritereRosen(theta);
%on s'assure que H est def pos (det(H)=0)
[L,test]=chol(H,'lower');
if test
tau=norm(H,'fro');
H=H+tau/2*eye(2);
end
% calcul de la direction de descente
direction=-H\grad;
% Determination du pas de recherche
pas = 0.01;
% mise a jour de theta = theta + alpha * direction
theta = theta + pas*direction;
k=k+1;
% trace du theta courant
figure(1), hold on
%subplot(2,1,1)
h = plot(theta(1,:), theta(2,:), 'ro');
set(h, 'MarkerSize', 3, 'markerfacecolor', 'r');
%drawnow
criteres=[criteres crit];
%subplot(2,1,2),
figure(2),hold on
plot(criteres)
%drawnow
end
temps_newton_regularisee=toc
% figure(2), hold on
% plot(crit)
% drawnow
end
IML/projet_IML/projet.m
View file @
98d649be
...
...
@@ -9,10 +9,9 @@
% Auto-generated by MATLAB on 2019/01/07 14:29:01
% Initialize variables.
filename
=
'/Users/louis/Documents/INSA/ASI4_LouisULMER/IML/projet_IML/bank-additional-full.csv'
;
delimiter
=
';'
;
filename
=
'bank-additional-full.csv'
;
startRow
=
2
;
delimiter
=
';'
;
% Format for each line of text:
% column1: double (%f)
% column2: categorical (%C)
...
...
@@ -86,10 +85,10 @@ clearvars filename delimiter startRow formatSpec fileID dataArray ans;
ind_yes
=
find
(
y
==
'yes'
);
Y
=
zeros
(
length
(
y
),
1
);
Y
(
ind_yes
)
=
1
;
%%
%%
Procedure suppression NAN value
X
=
table
(
age
,
campaign
,
consconfidx
,
conspriceidx
,
default
,
duration
,
education
,
empvarrate
,
euribor3m
,
housing
,
job
,
loan
,
marital
,
nremployed
,
pdays
,
poutcome
,
previous
,
Y
);
%% Suppression des "unknown" de job et education
idx
=
any
(
strcmp
(
cellstr
([
job
,
education
,
housing
,
loan
,
marital
]),
'unknown'
),
2
);
idx
=
any
(
strcmp
(
cellstr
([
job
,
education
,
housing
,
loan
,
marital
]),
'unknown'
),
2
);
X
(
idx
,:)
=
[];
Y
(
idx
)
=
[];
%% Codage Disjonctif des autres variables
...
...
@@ -103,8 +102,6 @@ euribor3m=X.euribor3m;
nremployed
=
X
.
nremployed
;
previous
=
X
.
previous
;
pdays
=
X
.
pdays
;
%%
housing
=
disjonctif2
(
X
.
housing
);
poutcome
=
disjonctif2
(
X
.
poutcome
);
job
=
disjonctif2
(
X
.
job
);
...
...
@@ -115,13 +112,14 @@ default=disjonctif2(X.default);
%%
% On passe en matriciel
X
=
[
age
consconfidx
conspriceidx
default
duration
education
empvarrate
euribor3m
housing
job
loan
marital
nremployed
poutcome
];
%% Matrice de correlation
%% Matrice de correlation
correlation_matrix
(
X
);
% on observe des correlations entre
% on observe des correlations entre
% cons.price.idx || contact
%
%
A VOIR DIMENSSION CHAQUE LABEL
%% PCA
[
valpropres
,
U
,
moy
]
=
mypca
(
X
);
bar
(
valpropres
/
sum
(
valpropres
));
...
...
@@ -132,15 +130,19 @@ P=U(:,1:d);
C
=
projpca
(
X
,
moy
,
P
);
figure
plot
(
C
(
Y
==
1
,
1
),
C
(
Y
==
1
,
2
),
'ro'
)
hold
on
hold
on
plot
(
C
(
Y
==
0
,
1
),
C
(
Y
==
0
,
2
),
'bo'
)
%% Modalits de chaque classe
label0
=
sum
(
Y
==
0
)
%compter les occurences du jeu de test (idem ligne du dessus)
% L'acp ne nous aide pas
%% Modalits de chaque classe
% Compter les occurences
label0
=
sum
(
Y
==
0
)
label1
=
sum
(
Y
==
1
)
%% Downsampling
% On constate un desechilibre entre les deux classes, il faudait penser a
% faire une SVM avec penalisation differente en fonction des points
%% Downsampling
Z
=
[
X
Y
];
[
n
p
]
=
size
(
X
);
Z
=
mydownsampling
(
Z
,
6
);
Z
=
mydownsampling
(
Z
,
8
);
X_small
=
Z
(:,
1
:
end
-
1
);
Y_small
=
Z
(:,
end
);
%% Depouage des donnees
...
...
@@ -149,55 +151,86 @@ ratio =2/3;
ratio
=
1
/
2
;
[
xapp
,
yapp
,
xval
,
yval
]
=
splitdata
(
xapp
,
yapp
,
ratio
);
%% Centrer reduire
[
xtest
]
=
normalizemeanstd
(
xtest
);
[
xval
]
=
normalizemeanstd
(
xval
);
[
xapp
]
=
normalizemeanstd
(
xapp
);
meanx
=
mean
(
xapp
);
stdx
=
std
(
xapp
);
[
xapp
,
xval
,
~
,
~
]
=
normalizemeanstd
(
xapp
,
xval
,
meanx
,
stdx
);
[
~
,
xtest
,
~
,
~
]
=
normalizemeanstd
(
xapp
,
xtest
,
meanx
,
stdx
);
%% 2.2 Methode des K-ppV
k
=
3
;
[
ypred
,
MatDist
]
=
kppv
(
xval
,
xapp
,
yapp
,
k
,
[]);
err
=
mean
(
yval
~=
ypred
)
% Erreur obtenue avec k=3 : 10.07%
%% choix d'une valeur de k
vectK
=
floor
(
linspace
(
1
,
20
,
10
));
%echelle lineaire des valeurs de k possibles
% evaluation d'un modle
err
=
zeros
(
length
(
vectK
),
1
);
%prealocation du vecteur pour gagner en performance
for
i
=
1
:
1
:
length
(
vectK
)
[
ypred
,
MatDist
]
=
kppv
(
xval
,
xapp
,
yapp
,
vectK
(
i
),
[]);
err
(
i
)
=
mean
(
yval
~=
ypred
);
%calcul de l'erreur de validation
end
vectK
=
floor
(
linspace
(
1
,
20
,
10
));
%echelle lineaire des valeurs de k possibles
% evaluation d'un mod
e
le
err
=
zeros
(
length
(
vectK
),
1
);
%prealocation du vecteur pour gagner en performance
for
i
=
1
:
1
:
length
(
vectK
)
[
ypred
,
MatDist
]
=
kppv
(
xval
,
xapp
,
yapp
,
vectK
(
i
),
[]);
err
(
i
)
=
mean
(
yval
~=
ypred
);
%calcul de l'erreur de validation
end
[
val
,
pos
]
=
min
(
err
);
%On trouve l'indice du vecteur pour lequel l'erreur et minimale
meilleur_K
=
(
vectK
(
pos
))
% On selectione le meilleur k possible grace a l'indice
%% essai de cette valeur sur les donnes de test
%% essai de cette valeur sur les donnes de test
k
=
meilleur_K
;
[
ypred
,
MatDist
]
=
kppv
(
xtest
,
xapp
,
yapp
,
k
,
[]);
err
=
mean
(
ytest
~=
ypred
)
%% SVM
% On trouve une erreur de 9.55% avec le meilleur k (13) qui est une bonne
% performance dans la realite.
%% SVM
yapp
(
yapp
==
0
)
=-
1
;
ytest
(
ytest
==
0
)
=-
1
;
yval
(
yval
==
0
)
=-
1
;
%%
% evaluation d'un modle
vectC
=
logspace
(
-
2
,
2
,
9
)
%echelle logarithmique
precision
=
zeros
(
length
(
vectC
),
1
);
for
i
=
1
:
1
:
length
(
vectC
)
[
wapp
,
b
]
=
monsvmclass
(
xapp
,
yapp
,
vectC
(
i
));
% evaluation d'un modele
vectC
=
logspace
(
-
2
,
2
,
9
)
%echelle logarithmique
precision
=
zeros
(
length
(
vectC
),
1
);
for
i
=
1
:
1
:
length
(
vectC
)
[
wapp
,
b
]
=
monsvmclass
(
xapp
,
yapp
,
vectC
(
i
));
ypred
=
monsvmval
(
xval
,
wapp
,
b
);
%f(x) et y on le meme signe ?
ind_bon
=
find
(
ypred
.*
yval
>
0
);
n_bon
=
length
(
ind_bon
);
%nombre de valeurs bien classifies
precision
(
i
)
=
n_bon
/
length
(
yval
)
end
[
val
,
pos
]
=
max
(
precision
)
meilleur_C
=
(
vectC
(
pos
))