diff options
-rwxr-xr-x | train.py | 37 |
1 files changed, 22 insertions, 15 deletions
@@ -38,9 +38,9 @@ n_cases = 10 n_pixels = 2576 # subtract the normal face from each row of the face matrix -def normalise_faces(average_face, faces): +def normalise_faces(deviations_tr, average_face, faces): faces = np.subtract(faces, np.tile(average_face, (faces.shape[0],1))) - return np.divide(faces.T, np.std(faces.T, axis=0)).T + return np.divide(faces, deviations_tr) # Split data into training and testing sets def test_split(n_faces, raw_faces, split, seed): @@ -82,16 +82,22 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): explained_variances = () + + distances = np.zeros(faces_test.shape[0]) + if args.pca or args.pca_r: # faces_pca containcts the principial components or the M most variant eigenvectors average_face = np.mean(faces_train, axis=0) #PLOTTING MEAN FACE #plt.imshow(average_face.reshape([46,56]).T, cmap = 'gist_gray') plt.show() - deviations_tr = np.std(faces_train, axis=0) - deviations_tst = np.std(faces_train, axis=0) - faces_train = normalise_faces(average_face, faces_train) - faces_test = normalise_faces(average_face, faces_test) + if args.classifyalt: + deviations_tr = np.ones(n_pixels) + else: + deviations_tr = np.std(faces_train, axis=0) + # deviations_tst = np.std(faces_test, axis=0) + faces_train = normalise_faces(deviations_tr, average_face, faces_train) + faces_test = normalise_faces(deviations_tr, average_face, faces_test) if (args.pca_r): e_vals, e_vecs = LA.eigh(np.dot(faces_train, faces_train.T)) e_vecs = np.dot(faces_train.T, e_vecs) @@ -107,14 +113,12 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): e_vals = np.flip(e_vals)[:M] e_vecs = np.fliplr(e_vecs).T[:M] deviations_tr = np.flip(deviations_tr) - deviations_tst = np.flip(deviations_tst) + # deviations_tst = np.flip(deviations_tst) faces_train = np.dot(faces_train, e_vecs.T) faces_test = np.dot(faces_test, e_vecs.T) - distances = np.zeros(faces_test.shape[0]) - for i in range(faces_test.shape[0]): - norm = LA.norm(faces_train - np.tile(faces_test[i], (faces_train.shape[0], 1)), axis=1) - distances[i] = np.amin(norm) + rec_vecs = np.add(np.tile(average_face, (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr) + distances = LA.norm(raw_faces_test - rec_vecs, axis=1); if args.reconstruct: rec_vec = np.add(average_face, np.dot(faces_train[args.reconstruct], e_vecs) * deviations_tr) @@ -126,7 +130,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): if args.lda: if args.pca_r or (args.pca and M > n_training_faces - n_faces): - lda = LinearDiscriminantAnalysis(n_components=M, solver='eigen') + lda = LinearDiscriminantAnalysis(n_components=M, solver='svd') else: lda = LinearDiscriminantAnalysis(n_components=M, store_covariance='True') @@ -135,7 +139,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): class_means = lda.means_ e_vals = lda.explained_variance_ratio_ scatter_matrix = lda.covariance_ - print(LA.matrix_rank(scatter_matrix)) + print("Rank of scatter:", LA.matrix_rank(scatter_matrix)) if args.faces: if args.lda: @@ -206,14 +210,17 @@ def main(): parser.add_argument("-alt", "--classifyalt", help="Alternative method ON", action='store_true') args = parser.parse_args() + if args.lda and args.classifyalt: + sys.exit("LDA and Alt PCA can not be performed together") + raw_faces = genfromtxt(args.data, delimiter=',') targets = np.repeat(np.arange(n_faces),n_cases) faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed) if args.classifyalt: - faces_train = faces_train.reshape(n_faces, 8, n_pixels) - target_train = target_train.reshape(n_faces, 8) + faces_train = faces_train.reshape(n_faces, int(faces_train.shape[0]/n_faces), n_pixels) + target_train = target_train.reshape(n_faces, int(target_train.shape[0]/n_faces)) accuracy = np.zeros(n_faces) distances = np.zeros((n_faces, faces_test.shape[0])) |