aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasil Zlatanov <v@skozl.com>2018-11-07 15:53:06 +0000
committerVasil Zlatanov <v@skozl.com>2018-11-07 15:53:06 +0000
commit9e7a2c95c84e19ddf951fe262c0ba87ccfe0a6c4 (patch)
tree346f436a2582274877c89a712bb167eb8f7118c7
parentcb93af3155c318b9168fdec272d5203a57de3d47 (diff)
downloadvz215_np1915-9e7a2c95c84e19ddf951fe262c0ba87ccfe0a6c4.tar.gz
vz215_np1915-9e7a2c95c84e19ddf951fe262c0ba87ccfe0a6c4.tar.bz2
vz215_np1915-9e7a2c95c84e19ddf951fe262c0ba87ccfe0a6c4.zip
Use correct face normalisation
-rwxr-xr-xtrain.py30
1 files changed, 18 insertions, 12 deletions
diff --git a/train.py b/train.py
index 6ac963b..169163f 100755
--- a/train.py
+++ b/train.py
@@ -38,9 +38,9 @@ n_cases = 10
n_pixels = 2576
# subtract the normal face from each row of the face matrix
-def normalise_faces(average_face, faces):
+def normalise_faces(deviations_tr, average_face, faces):
faces = np.subtract(faces, np.tile(average_face, (faces.shape[0],1)))
- return np.divide(faces.T, np.std(faces.T, axis=0)).T
+ return np.divide(faces, deviations_tr)
# Split data into training and testing sets
def test_split(n_faces, raw_faces, split, seed):
@@ -82,16 +82,22 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
explained_variances = ()
+
+ distances = np.zeros(faces_test.shape[0])
+
if args.pca or args.pca_r:
# faces_pca containcts the principial components or the M most variant eigenvectors
average_face = np.mean(faces_train, axis=0)
#PLOTTING MEAN FACE
#plt.imshow(average_face.reshape([46,56]).T, cmap = 'gist_gray')
plt.show()
- deviations_tr = np.std(faces_train, axis=0)
- deviations_tst = np.std(faces_train, axis=0)
- faces_train = normalise_faces(average_face, faces_train)
- faces_test = normalise_faces(average_face, faces_test)
+ if args.classifyalt:
+ deviations_tr = np.ones(n_pixels)
+ else:
+ deviations_tr = np.std(faces_train, axis=0)
+ # deviations_tst = np.std(faces_test, axis=0)
+ faces_train = normalise_faces(deviations_tr, average_face, faces_train)
+ faces_test = normalise_faces(deviations_tr, average_face, faces_test)
if (args.pca_r):
e_vals, e_vecs = LA.eigh(np.dot(faces_train, faces_train.T))
e_vecs = np.dot(faces_train.T, e_vecs)
@@ -107,13 +113,10 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
e_vals = np.flip(e_vals)[:M]
e_vecs = np.fliplr(e_vecs).T[:M]
deviations_tr = np.flip(deviations_tr)
- deviations_tst = np.flip(deviations_tst)
+ # deviations_tst = np.flip(deviations_tst)
faces_train = np.dot(faces_train, e_vecs.T)
faces_test = np.dot(faces_test, e_vecs.T)
- distances = np.zeros(faces_test.shape[0])
-
-
rec_vecs = np.add(np.tile(average_face, (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr)
distances = LA.norm(raw_faces_test - rec_vecs, axis=1);
@@ -127,7 +130,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
if args.lda:
if args.pca_r or (args.pca and M > n_training_faces - n_faces):
- lda = LinearDiscriminantAnalysis(n_components=M, solver='eigen')
+ lda = LinearDiscriminantAnalysis(n_components=M, solver='svd')
else:
lda = LinearDiscriminantAnalysis(n_components=M, store_covariance='True')
@@ -136,7 +139,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
class_means = lda.means_
e_vals = lda.explained_variance_ratio_
scatter_matrix = lda.covariance_
- print(LA.matrix_rank(scatter_matrix))
+ print("Rank of scatter:", LA.matrix_rank(scatter_matrix))
if args.faces:
if args.lda:
@@ -207,6 +210,9 @@ def main():
parser.add_argument("-alt", "--classifyalt", help="Alternative method ON", action='store_true')
args = parser.parse_args()
+ if args.lda and args.classifyalt:
+ sys.exit("LDA and Alt PCA can not be performed together")
+
raw_faces = genfromtxt(args.data, delimiter=',')
targets = np.repeat(np.arange(n_faces),n_cases)