aboutsummaryrefslogtreecommitdiff
path: root/train.py
diff options
context:
space:
mode:
Diffstat (limited to 'train.py')
-rwxr-xr-xtrain.py39
1 files changed, 25 insertions, 14 deletions
diff --git a/train.py b/train.py
index a2d9d4b..46f76a4 100755
--- a/train.py
+++ b/train.py
@@ -6,7 +6,6 @@
import matplotlib.pyplot as plt
import sys
import random
-from numpy import linalg as LA
from random import randint
from sklearn.neighbors import KNeighborsClassifier
@@ -21,11 +20,11 @@ import argparse
import numpy as np
from numpy import genfromtxt
-# from numpy import linalg as LA
+from numpy import linalg as LA
# subtract the normal face from each row of the face matrix
-def normalise_faces(average_face, raw_faces):
- return np.subtract(raw_faces, np.tile(average_face, (raw_faces.shape[1],1)).T)
+def normalise_faces(average_face, faces):
+ return np.subtract(faces, np.tile(average_face, (faces.shape[0],1)))
# usage: train.py [-h] -i DATA -o MODEL [-m M]
parser = argparse.ArgumentParser()
@@ -60,8 +59,6 @@ def test_split(n_faces, raw_faces, split, seed):
random.seed(seed)
n_cases = 52
n_pixels = 2576
-
- print(raw_faces.shape)
raw_faces_split = np.split(raw_faces,n_cases)
n_training_faces = int(round(n_cases*(1 - split)))
@@ -83,9 +80,9 @@ def test_split(n_faces, raw_faces, split, seed):
faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed)
# This remove the mean and scales to unit variance
-sc = StandardScaler()
-faces_train = sc.fit_transform(faces_train)
-faces_test = sc.transform(faces_test)
+#sc = StandardScaler()
+#faces_train = sc.fit_transform(faces_train)
+#faces_test = sc.transform(faces_test)
explained_variances = ()
if args.lda:
@@ -95,17 +92,31 @@ if args.lda:
explained_variances = lda.explained_variance_ratio_
else:
# faces_pca containcts the principial components or the M most variant eigenvectors
- pca = PCA(svd_solver='full', n_components=M)
- faces_train = pca.fit_transform(faces_train)
- faces_test = pca.transform(faces_test)
- explained_variances = pca.explained_variance_ratio_
+### FROM SKLEARN
+# pca = PCA(svd_solver='full', n_components=M)
+# faces_train = pca.fit_transform(faces_train)
+# faces_test = pca.transform(faces_test)
+# explained_variances = pca.explained_variance_ratio_
+
+### FROM OLD CODE
+ average_face = np.mean(faces_train, axis=0)
+ plt.imshow(average_face.reshape(46,56))
+ plt.show()
+ faces_train = normalise_faces(average_face, faces_train)
+ faces_test = normalise_faces(average_face, faces_test)
+ e_vals, e_vecs = LA.eigh(np.dot(faces_train.T, faces_train))
+ print(e_vecs.shape)
+ explained_variances = e_vals[:M]
+ e_vecs =np.divide(e_vecs, LA.norm(e_vecs))
+ faces_train = np.dot(faces_train, e_vecs[:M])
+ faces_test = np.dot(faces_test, e_vecs[:M])
# Plot the variances (eigenvalues) from the pca object
if args.faces:
if args.lda:
sys.exit("Can not plot eigenfaces when using LDA")
for i in range(args.faces):
ax = plt.subplot(2, args.faces/2, i + 1)
- ax.imshow(pca.components_[i].reshape([46, 56]).T)
+ ax.imshow(e_vecs[i].reshape([46, 56]), cmap = 'gist_gray')
plt.show()
if args.principal: