aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xtrain.py29
1 files changed, 21 insertions, 8 deletions
diff --git a/train.py b/train.py
index 25993f7..b4aaa89 100755
--- a/train.py
+++ b/train.py
@@ -59,23 +59,28 @@ def test_split(n_faces, raw_faces, split, seed):
def draw_conf_mat(args, target_test, target_pred):
cm = confusion_matrix(target_test, target_pred)
- print(cm)
+ acc_sc = accuracy_score(target_test, target_pred)
+ print('Accuracy: ', acc_sc)
if (args.conf_mat):
plt.matshow(cm, cmap='Blues')
plt.colorbar()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
- return accuracy_score(target_test, target_pred)
+ return acc_sc
def test_model(M, faces_train, faces_test, target_train, target_test, args):
raw_faces_train = faces_train
+ raw_faces_test = faces_test
explained_variances = ()
if args.pca or args.pca_r:
# faces_pca containcts the principial components or the M most variant eigenvectors
average_face = np.mean(faces_train, axis=0)
+ #PLOTTING MEAN FACE
+ #plt.imshow(average_face.reshape([46,56]).T, cmap = 'gist_gray')
+ plt.show()
deviations_tr = np.std(faces_train, axis=0)
deviations_tst = np.std(faces_train, axis=0)
faces_train = normalise_faces(average_face, faces_train)
@@ -89,6 +94,10 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
print('Standard PCA')
e_vals, e_vecs = LA.eigh(np.cov(faces_train.T))
# e_vecs = normalise_faces(np.mean(e_vecs,axis=0), e_vecs)
+ #PLOTTING NON-ZERO EVALS
+ #if args.pca:
+ # plt.semilogy(range(2576), np.absolute(416*np.flip(e_vals)))
+ # plt.show()
e_vals = np.flip(e_vals)[:M]
e_vecs = np.fliplr(e_vecs).T[:M]
@@ -200,17 +209,21 @@ def main():
raw_faces = genfromtxt(args.data, delimiter=',')
targets = np.repeat(np.arange(n_faces),n_cases)
-
faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed)
-
-
+
if args.reigen:
- for M in range(args.eigen, args,reigen):
+ accuracy = np.zeros(args.reigen - args.eigen)
+ for M in range(args.eigen, args.reigen):
start = timer()
- accuracy[M] = test_model(M, faces_train, faces_test, target_train, target_test, args)
+ accuracy[M - args.eigen] = test_model(M, faces_train, faces_test, target_train, target_test, args)
end = timer()
print("Run with", M, "eigenvalues completed in ", end-start, "seconds")
-
+ #plot
+ plt.plot(range(args.eigen, args.reigen), 100*accuracy)
+ plt.xlabel('Number of Eigenvectors used (M)')
+ plt.ylabel('Recognition Accuracy (%)')
+ plt.grid(True)
+ plt.show()
else:
M = args.eigen
start = timer()