9.8 KiB
9.8 KiB
In [21]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import time
from scipy.sparse import csc_matrix, hstack
In [22]:
def load_data(name, big_data=False):
S = scipy.io.loadmat(name)
if big_data:
a = S["a"].tocsr() #ajouter ".tocsr()" pour le dataset "Ybig_train_set.mat"
else:
a = S["a"]
y = S["y"]
id_classe1 = np.where(y==1)[0]
id_classe2 = np.where(y==0)[0]
return a[id_classe1,:], a[id_classe2,:]
In [23]:
def optimisation(classe1,classe2,methode="gradient",pas="wolfe",maxiter=10000,maxtime=10,big_data=False):
def fct(x):
# Valeur pour clip l'exponentielle (éviter certains problèmes)
D = 50
# Pour les yi==0
exposant = np.clip(a1@x, -D , D)
exp = np.exp(-exposant)
f1 = sum(np.log(1+exp))
g1 = -a1.T@(exp/(1+exp))
# Pour les yi==1
exposant = np.clip(a2@x, -D, D)
exp = np.exp(-exposant)
f2 = sum(exp) + sum(np.log(1+exp))
g2 = a2.T@(1/(1+exp))
f = f1 + f2
g = g1 + g2
return f, g
def w1(x,d,alpha,beta1):
f0, g0 = fct(x)
f1, g1 = fct(x+alpha*d)
return f1 <= f0 + alpha*beta1*np.dot(d,g0)
def w2(x,d,alpha,beta2):
f0, g0 = fct(x)
f1, g1 = fct(x+alpha*d)
return np.dot(d,g1) >= beta2*np.dot(d,g0)
def wolfebissection(x,d,alpha=1,beta1=0.0001,beta2=0.9):
aleft = 0
aright = np.inf
iter = 0
while True:
if w1(x,d,alpha,beta1) and w2(x,d,alpha,beta2):
break
if not w1(x,d,alpha,beta1):
aright = alpha
alpha = (aleft+aright)/2
elif not w2(x,d,alpha,beta2):
aleft = alpha
if aright<np.inf:
alpha = (aleft+aright)/2
else:
alpha = 2*alpha #le "2" est arbitraire, du moment que ce soit >1
iter += 1
return alpha
def backtrackingsimple(x,d,alpha=1):
f0, g0 = fct(x) # valeur et gradient à l'itéré
f1, g1 = fct(x+alpha*d) # valeur et gradient à l'itéré souhaité
while f1>f0 and alpha>=1e-16:
alpha /= 2
f1, g1 = fct(alpha*d)
return alpha
def methgradient(x0):
start_time = time.time()
temps = np.zeros(maxiter)
fobj = np.zeros(maxiter)
x = x0.copy()
for k in range(maxiter):
f, g = fct(x)
d = -g
if pas=="wolfe":
alpha = wolfebissection(x, d)
elif pas=="back":
alpha = backtrackingsimple(x, d)
x = x + alpha*d
f, g = fct(x)
fobj[k] = f
temps[k] = time.time() - start_time
if temps[k] >= maxtime:
break
return x, fobj[k+1], temps[k+1]
def methgradientacc(x0):
# Pas fait en séance
pass
#Données des deux groupes a1 et a2
n1, m = classe1.shape
n2, m = classe2.shape
if big_data:
a1 = np.ones((n1, 1+m)) # !! csc_matrix()
a2 = np.ones((n1, 1+m))
else:
a1 = np.ones((n1, 1+m)) # !! csc_matrix()
a2 = np.ones((n1, 1+m))
a1[:,1:] = classe1
a2[:,1:] = classe2
#Itéré initial
x0 = np.ones(1+m)
if methode=="gradient": x,f,t = methgradient(x0)
# Pas fait en séance
# if methode=="gradientacc": x,f,t = methgradientacc(x0)
return x, f, t
In [ ]:
# ############# Entrainement du modèle #############
classe1_train, classe2_train = load_data("train_set.mat")
x_gd_wf, f_gd_wf, t_gd_wf = optimisation(classe1_train,classe2_train,maxtime=60)
x_gd_bt, f_gd_bt, t_gd_bt = optimisation(classe1_train,classe2_train,pas="back",maxtime=60)
#x_ga_wf, f_ga_wf, t_ga_wf = optimisation(classe1_train,classe2_train,methode="gradientacc", pas="wolfe",maxtime=60)
#x_ga_bt, f_ga_bt, t_ga_bt = optimisation(classe1_train,classe2_train,methode="gradientacc", pas="backtracking",maxtime=60)
plt.loglog(t_gd_wf,f_gd_wf,label="Grad. Wolfe")
plt.loglog(t_gd_bt,f_gd_bt,label="Grad. Backting")
#plt.loglog(t_ga_wf,f_ga_wf,label="GradAcc. Wolfe")
#plt.loglog(t_ga_bt,f_ga_bt,label="GradAcc. Backting")
plt.xlabel("Temps")
plt.ylabel("Erreur")
plt.legend(loc=1)
plt.show()
plt.loglog(t_gd_wf,f_gd_wf,label="Grad. Wolfe")
plt.loglog(t_gd_bt,f_gd_bt,label="Grad. Backting")
#plt.loglog(t_ga_wf,f_ga_wf,label="GradAcc. Wolfe")
#plt.loglog(t_ga_bt,f_ga_bt,label="GradAcc. Backting")
plt.xlabel("Temps")
plt.ylabel("Erreur")
plt.legend(loc=1)
plt.show()
In [ ]:
classe1_train, classe2_train = load_data("big_train_set.mat", big_data=True)
x_gd_bt, f_gd_bt, t_gd_bt = optimisation(classe1_train,classe2_train,pas="backtracking",maxtime=180,big_data=True)
#x_ga_bt, f_ga_bt, t_ga_bt = optimisation(classe1_train,classe2_train,methode="gradientacc", pas="backtracking",maxtime=180)
plt.loglog(t_gd_bt,f_gd_bt,label="Grad. Backting")
#plt.loglog(t_ga_bt,f_ga_bt,label="GradAcc. Backting")
plt.xlabel("Temps")
plt.ylabel("Erreur")
plt.legend(loc=1)
In [ ]:
def entrainement(classe1,classe2):
...
In [ ]:
############# Entrainement du modèle #############
classe1_train, classe2_train = load_data("Ybig_train_set.mat")
poids_reseau = entrainement(classe1_train,classe2_train)
In [ ]:
def validation(poids_reseau,classe1_test,classe2_test):
def pred_sigmoide(poids_reseau,data):
return np.round(1/(1+np.exp(-poids_reseau[0]-data.dot(poids_reseau[1:]))))
correct = 0
nb1, nb2 = classe1_test.shape[0], classe2_test.shape[0]
for i in range(nb1): correct += pred_sigmoide(poids_reseau,classe1_test[i,:])
for i in range(nb2): correct += 1-pred_sigmoide(poids_reseau,classe2_test[i,:])
print(f"{correct}/{nb1+nb2} donnees classees correctement dans jeu test")
return correct
In [ ]:
############# Validation du modèle #############
classe1_test, classe2_test = load_data("Ybig_test_set.mat")
result = validation(poids_reseau,classe1_test,classe2_test)