aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xtrain.py70
1 files changed, 38 insertions, 32 deletions
diff --git a/train.py b/train.py
index 93139b7..3b2b364 100755
--- a/train.py
+++ b/train.py
@@ -26,7 +26,6 @@ from numpy import linalg as LA
def normalise_faces(average_face, faces):
faces = np.subtract(faces, np.tile(average_face, (faces.shape[0],1)))
return np.divide(faces.T, np.std(faces.T, axis=0)).T
-
# Split data into training and testing sets
def test_split(n_faces, raw_faces, split, seed):
random.seed(seed)
@@ -50,7 +49,6 @@ def test_split(n_faces, raw_faces, split, seed):
faces_test = faces_test.reshape(n_faces*n_test_faces, n_pixels)
return faces_train, faces_test, target_train, target_test
-
# usage: train.py [-h] -i DATA -o MODEL [-m M]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--data", help="Input CSV file", required=True)
@@ -65,10 +63,11 @@ parser.add_argument("-t", "--split", help="Fractoin of data to use for testing",
parser.add_argument("-2", "--grapheigen", help="Swow 2D graph of targets versus principal components", action='store_true')
parser.add_argument("-p", "--pca", help="Use PCA", action='store_true')
parser.add_argument("-l", "--lda", help="Use LDA", action='store_true')
+parser.add_argument("-r", "--reconstruct", help="Use PCA reconstruction, specify face NR", type=int, default=0)
+parser.add_argument("-q", "--pca_r", help="Use Reduced PCA", action='store_true')
+
args = parser.parse_args()
-if args.pca and args.lda:
- sys.exit("Flags -p and -l are mutually exclusive")
M = args.eigen
@@ -80,50 +79,57 @@ n_faces = 10
faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed)
# This remove the mean and scales to unit variance
-#sc = StandardScaler()
+sc = StandardScaler()
#faces_train = sc.fit_transform(faces_train)
#faces_test = sc.transform(faces_test)
explained_variances = ()
-if args.lda:
- average_face = np.mean(faces_train, axis=0)
- n_cases = 52
-# lda = LinearDiscriminantAnalysis(n_components=M)
-# faces_train = lda.fit_transform(faces_train, target_train)
-# faces_test = lda.transform(faces_test)
-# explained_variances = lda.explained_variance_ratio_
-### FIND MEAN OF EACH CLASS
- n_training_faces = int(round(n_cases*(1 - args.split)))
- n_test_faces = n_cases - n_training_faces
- mean_vector = np.zeros(10)
- for n in range (10):
- mean_acc = 0
- for x in range (int(np.divide(n_training_faces,10))):
- mean_acc = np.add(mean_acc, np.mean(faces_train[x + n*10], axis=0))
- mean_vector [n] = np.divide(mean_acc, np.divide(n_training_faces,10))
- print (mean_vector)
-### SCATTER MATRIX
- for n in range (10)
- faces_train = normalise_faces(mean_vector[n], faces_train[
-else:
+if args.pca or (args.pca and args.lda) or args.pca_r:
# faces_pca containcts the principial components or the M most variant eigenvectors
average_face = np.mean(faces_train, axis=0)
faces_train = normalise_faces(average_face, faces_train)
faces_test = normalise_faces(average_face, faces_test)
- e_vals, e_vecs = LA.eigh(np.cov(faces_train.T))
+ if (args.pca_r):
+ e_vals, e_vecs = LA.eigh(np.cov(faces_train))
+ e_vecs_original = e_vecs
+ e_vecs = np.dot(faces_train.T, e_vecs)
+ e_vecs = sc.fit_transform(e_vecs)
+ ###TODO Maybe replace with our normalising function
+
+ if (args.reconstruct):
+ rec_vec = np.divide(average_face, np.std(average_face)).T
+ e_vecs_t = e_vecs.T
+ for i in range (M):
+ rec_vec = np.add(rec_vec, np.dot(e_vecs_original[i][args.reconstruct], e_vecs_t[i]))
+ plt.imshow(rec_vec.reshape([46,56]).T, cmap = 'gist_gray')
+ plt.show()
+ else:
+ e_vals, e_vecs = LA.eigh(np.cov(faces_train.T))
+
e_vals = np.flip(e_vals)
e_vecs = np.fliplr(e_vecs).T
faces_train = np.dot(faces_train, e_vecs[:M].T)
faces_test = np.dot(faces_test, e_vecs[:M].T)
+#FOR THE ASSESSMENT PRINT EIGENVALUES AND EIGENVECTORS OF BOTH CASES AND COMPARE RESULTS WITH PHYSICAL EXPLAINATIONS
+
-# Plot the variances (eigenvalues) from the pca object
+if args.lda or (args.pca and args.lda):
+ lda = LinearDiscriminantAnalysis(n_components=M)
+ faces_train = lda.fit_transform(faces_train, target_train)
+ faces_test = lda.transform(faces_test)
+ class_means = lda.means_
+ e_vals = lda.explained_variance_ratio_
+
if args.faces:
if args.lda:
- sys.exit("Can not plot eigenfaces when using LDA")
- for i in range(args.faces):
- ax = plt.subplot(2, args.faces/2, i + 1)
- ax.imshow(e_vecs[i].reshape([46, 56]).T, cmap = 'gist_gray')
+ for i in range (10):
+ ax = plt.subplot(2, 5, i + 1)
+ ax.imshow(class_means[i].reshape([46,56]).T)
+ else:
+ for i in range(args.faces):
+ ax = plt.subplot(2, args.faces/2, i + 1)
+ ax.imshow(e_vecs[i].reshape([46, 56]).T, cmap = 'gist_gray')
plt.show()
if args.principal: