aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornunzip <np.scarh@gmail.com>2018-11-06 16:36:01 +0000
committerVasil Zlatanov <v@skozl.com>2018-11-06 18:30:46 +0000
commitc2d16123a81ef0d79450fbc89bf8086464778860 (patch)
treec76643c0d6fd76f13a37e7862347db60926761a8
parentda45e1bd00aeb5f5d56a63442a26d8c040937c17 (diff)
downloadvz215_np1915-c2d16123a81ef0d79450fbc89bf8086464778860.tar.gz
vz215_np1915-c2d16123a81ef0d79450fbc89bf8086464778860.tar.bz2
vz215_np1915-c2d16123a81ef0d79450fbc89bf8086464778860.zip
Plotting + Fix m to M iteration code
-rwxr-xr-xtrain.py29
1 files changed, 21 insertions, 8 deletions
diff --git a/train.py b/train.py
index 25993f7..b4aaa89 100755
--- a/train.py
+++ b/train.py
@@ -59,23 +59,28 @@ def test_split(n_faces, raw_faces, split, seed):
def draw_conf_mat(args, target_test, target_pred):
cm = confusion_matrix(target_test, target_pred)
- print(cm)
+ acc_sc = accuracy_score(target_test, target_pred)
+ print('Accuracy: ', acc_sc)
if (args.conf_mat):
plt.matshow(cm, cmap='Blues')
plt.colorbar()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
- return accuracy_score(target_test, target_pred)
+ return acc_sc
def test_model(M, faces_train, faces_test, target_train, target_test, args):
raw_faces_train = faces_train
+ raw_faces_test = faces_test
explained_variances = ()
if args.pca or args.pca_r:
# faces_pca containcts the principial components or the M most variant eigenvectors
average_face = np.mean(faces_train, axis=0)
+ #PLOTTING MEAN FACE
+ #plt.imshow(average_face.reshape([46,56]).T, cmap = 'gist_gray')
+ plt.show()
deviations_tr = np.std(faces_train, axis=0)
deviations_tst = np.std(faces_train, axis=0)
faces_train = normalise_faces(average_face, faces_train)
@@ -89,6 +94,10 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
print('Standard PCA')
e_vals, e_vecs = LA.eigh(np.cov(faces_train.T))
# e_vecs = normalise_faces(np.mean(e_vecs,axis=0), e_vecs)
+ #PLOTTING NON-ZERO EVALS
+ #if args.pca:
+ # plt.semilogy(range(2576), np.absolute(416*np.flip(e_vals)))
+ # plt.show()
e_vals = np.flip(e_vals)[:M]
e_vecs = np.fliplr(e_vecs).T[:M]
@@ -200,17 +209,21 @@ def main():
raw_faces = genfromtxt(args.data, delimiter=',')
targets = np.repeat(np.arange(n_faces),n_cases)
-
faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed)
-
-
+
if args.reigen:
- for M in range(args.eigen, args,reigen):
+ accuracy = np.zeros(args.reigen - args.eigen)
+ for M in range(args.eigen, args.reigen):
start = timer()
- accuracy[M] = test_model(M, faces_train, faces_test, target_train, target_test, args)
+ accuracy[M - args.eigen] = test_model(M, faces_train, faces_test, target_train, target_test, args)
end = timer()
print("Run with", M, "eigenvalues completed in ", end-start, "seconds")
-
+ #plot
+ plt.plot(range(args.eigen, args.reigen), 100*accuracy)
+ plt.xlabel('Number of Eigenvectors used (M)')
+ plt.ylabel('Recognition Accuracy (%)')
+ plt.grid(True)
+ plt.show()
else:
M = args.eigen
start = timer()