diff options
author | Vasil Zlatanov <vasil@netcraft.com> | 2018-11-14 23:03:37 +0000 |
---|---|---|
committer | Vasil Zlatanov <vasil@netcraft.com> | 2018-11-14 23:03:37 +0000 |
commit | 948eca906f80a06e7386e1c7a31e3678178f82ad (patch) | |
tree | 8ed7449565097fe73178b2c52105c02630106f9b | |
parent | 3a7121ad1800dbb569acbe8af37974a2d6114b0f (diff) | |
download | vz215_np1915-948eca906f80a06e7386e1c7a31e3678178f82ad.tar.gz vz215_np1915-948eca906f80a06e7386e1c7a31e3678178f82ad.tar.bz2 vz215_np1915-948eca906f80a06e7386e1c7a31e3678178f82ad.zip |
Add randomisation in feature space
-rwxr-xr-x | train.py | 52 |
1 files changed, 34 insertions, 18 deletions
@@ -109,8 +109,18 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): # plt.semilogy(range(2576), np.absolute(416*np.flip(e_vals))) # plt.show() - e_vals = np.flip(e_vals)[:M] - e_vecs = np.fliplr(e_vecs).T[:M] + e_vals = np.flip(e_vals) + e_vecs = np.fliplr(e_vecs).T + + if args.random: + random_features = random.sample(range(M-args.random, M), args.random) + for i in range(args.random): + e_vals[M-i] = e_vals[random_features[i]] + e_vecs[M-i] = e_vecs[random_features[i]] + + e_vals = e_vals[:M] + e_vecs = e_vecs[:M] + deviations_tr = np.flip(deviations_tr) # deviations_tst = np.flip(deviations_tst) faces_train = np.dot(faces_train, e_vecs.T) @@ -129,7 +139,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): if args.lda: if args.pca_r or (args.pca and M > n_training_faces - n_faces): - lda = LinearDiscriminantAnalysis(n_components=M, solver='eigen') + lda = LinearDiscriminantAnalysis(n_components=M, solver='svd') else: lda = LinearDiscriminantAnalysis(n_components=M, store_covariance='True') @@ -137,8 +147,7 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): faces_test = lda.transform(faces_test) class_means = lda.means_ e_vals = lda.explained_variance_ratio_ - scatter_matrix = lda.covariance_ - print("Rank of scatter:", LA.matrix_rank(scatter_matrix)) + # scatter_matrix = lda.covariance_; print("Rank of scatter:", LA.matrix_rank(scatter_matrix)) if args.faces: if args.lda: @@ -191,7 +200,9 @@ def main(): parser.add_argument("-i", "--data", help="Input CSV file", required=True) parser.add_argument("-m", "--eigen", help="Number of eigenvalues in model", type=int, default = 10 ) parser.add_argument("-M", "--reigen", help="Number of eigenvalues in model", type=int) - parser.add_argument("-b", "--bagging", help="Number of bagging baggings to use", type=int) + parser.add_argument("-e", "--ensemble", help="Number of ensemmbles to use", type=int, default = 0) + parser.add_argument("-b", "--bagging", help="Number of bags to use", action='store_true') + parser.add_argument("-R", "--random", help="Number of eigen value to randomise", type=int) parser.add_argument("-n", "--neighbors", help="How many neighbors to use", type=int, default = 1) ##USING STANDARD 1 FOR NN ACCURACY parser.add_argument("-f", "--faces", help="Show faces", type=int, default = 0) @@ -218,14 +229,18 @@ def main(): faces_train, faces_test, target_train, target_test = test_split(n_faces, raw_faces, args.split, args.seed) - if args.bagging: + if args.ensemble: n_training_faces = int(round(n_cases*(1 - args.split))) - faces_train_bagged = np.zeros((args.bagging, n_faces, n_training_faces, n_pixels)) - for x in range(args.bagging): - for k in range(n_faces): - samples = random.choices(range(n_training_faces), k=n_training_faces) - faces_train_bagged[x][k] = [faces_train[i+n_training_faces*k] for i in samples] - faces_train_bagged = faces_train_bagged.reshape(args.bagging, n_faces*n_training_faces, n_pixels) + faces_train_ens = np.zeros((args.ensemble, n_faces, n_training_faces, n_pixels)) + for x in range(args.ensemble): + if args.bagging: + for k in range(n_faces): + samples = random.choices(range(n_training_faces), k=n_training_faces) + faces_train_ens[x][k] = [faces_train[i+n_training_faces*k] for i in samples] + else: + faces_train_ens[x] = faces_train.reshape((n_faces, n_training_faces, n_pixels)) + + faces_train_ens = faces_train_ens.reshape(args.ensemble, n_faces*n_training_faces, n_pixels) if args.classifyalt: faces_train = faces_train.reshape(n_faces, int(faces_train.shape[0]/n_faces), n_pixels) @@ -254,11 +269,11 @@ def main(): plt.ylabel('Recognition Accuracy (%)') plt.grid(True) plt.show() - elif args.bagging: - rec_error = np.zeros((args.bagging, n_faces, faces_test.shape[0])) - target_pred = np.zeros((args.bagging, target_test.shape[0])) - for i in range(args.bagging): - target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_bagged[i], faces_test, target_train, target_test, args) + elif args.ensemble: + rec_error = np.zeros((args.ensemble, n_faces, faces_test.shape[0])) + target_pred = np.zeros((args.ensemble, target_test.shape[0])) + for i in range(args.ensemble): + target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_ens[i], faces_test, target_train, target_test, args) target_pred_comb = np.zeros(target_pred.shape[1]) target_pred = target_pred.astype(int).T @@ -270,6 +285,7 @@ def main(): start = timer() target_pred, distances = test_model(M, faces_train, faces_test, target_train, target_test, args) end = timer() + draw_results(args, target_test, target_pred) if __name__ == "__main__": |