aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasil Zlatanov <v@skozl.com>2019-02-11 17:47:09 +0000
committerVasil Zlatanov <v@skozl.com>2019-02-11 17:47:09 +0000
commit97acdd6ea9e378c90cf9a199e746ebca59a4d5e6 (patch)
tree3ac6578f5940c4b70ed9c0627a8c0da9b0c7f3d2
parent586909e40569cd32f398d4360ba67876da10887f (diff)
downloade4-vision-97acdd6ea9e378c90cf9a199e746ebca59a4d5e6.tar.gz
e4-vision-97acdd6ea9e378c90cf9a199e746ebca59a4d5e6.tar.bz2
e4-vision-97acdd6ea9e378c90cf9a199e746ebca59a4d5e6.zip
Add histogram fig
-rwxr-xr-xevaluate.py7
-rw-r--r--report/fig/km-histogram.pdfbin0 -> 13076 bytes
-rw-r--r--report/paper.md23
3 files changed, 18 insertions, 12 deletions
diff --git a/evaluate.py b/evaluate.py
index dff8482..9cb5f78 100755
--- a/evaluate.py
+++ b/evaluate.py
@@ -19,7 +19,7 @@ import time
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", help="Data path", action='store_true', default='data.npz')
parser.add_argument("-c", "--conf_mat", help="Show visual confusion matrix", action='store_true')
-parser.add_argument("-k", "--kmean", help="Perform kmean clustering with --kmean cluster centers", type=int, default=0)
+parser.add_argument("-k", "--kmean", help="Perform kmean clustering with KMEAN cluster centers", type=int, default=0)
parser.add_argument("-l", "--leaves", help="Maximum leaf nodes for RF classifier", type=int, default=256)
parser.add_argument("-e", "--estimators", help="number of estimators to be used", type=int, default=100)
parser.add_argument("-D", "--treedepth", help="depth of trees", type=int, default=5)
@@ -49,6 +49,11 @@ def make_histogram(data, model, args):
leaves = model.apply(data[i][j].T)
leaves = np.apply_along_axis(np.bincount, axis=0, arr=leaves, minlength=args.leaves)
histogram[i][j] = leaves.reshape(hist_size)
+
+ print(histogram[0][0].shape)
+ plt.bar(np.arange(100), histogram[0][0].flatten())
+ plt.show()
+
return histogram
def run_model (data, train, test, train_part, args):
diff --git a/report/fig/km-histogram.pdf b/report/fig/km-histogram.pdf
new file mode 100644
index 0000000..f459978
--- /dev/null
+++ b/report/fig/km-histogram.pdf
Binary files differ
diff --git a/report/paper.md b/report/paper.md
index 037d0df..d8e4fca 100644
--- a/report/paper.md
+++ b/report/paper.md
@@ -1,17 +1,18 @@
-# K-means codebook
-
-We randomly select 100k descriptors for K-means clustering for building the visual vocabulary
-(due to memory issue). Open the main_guideline.m and select/load the dataset.
-```
-[data_train, data_test] = getData('Caltech');
-```
-Set 'showImg = 0' in getData.m if you want to stop displaying training and testing images.
-Complete getData.m by writing your own lines of code to obtain the visual vocabulary and the
-bag-of-words histograms for both training and testing data. Show, measure and
-discuss the followings:
+# Codebooks
+
+## K-means codebook
+
+A common technique for codebook generation involves utilising K-means clustering on a sample of the
+image descriptors. In this way descriptors may be mapped to *visual* words which lend themselves to
+binning and therefore the creation of bag-of-words histograms for the use of classification.
+
+In this courseworok 100-thousand descriptors have been selected to build the visual vocabulary from the
+Caltech dataset.
## Vocabulary size
+The number of clusters or the number of centroids determine the vocabulary size.
+
## Bag-of-words histograms of example training/testing images
## Vector quantisation process