http://git-wip-us.apache.org/repos/asf/madlib-site/blob/af0e5f14/docs/v1.15.1/group__grp__kmeans.html
----------------------------------------------------------------------
diff --git a/docs/v1.15.1/group__grp__kmeans.html 
b/docs/v1.15.1/group__grp__kmeans.html
new file mode 100644
index 0000000..4fb7dea
--- /dev/null
+++ b/docs/v1.15.1/group__grp__kmeans.html
@@ -0,0 +1,492 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
+<html xmlns="http://www.w3.org/1999/xhtml";>
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.14"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data 
mining,deep learning,ensemble methods,data science,market basket 
analysis,affinity analysis,pca,lda,regression,elastic net,huber 
white,proportional hazards,k-means,latent dirichlet allocation,bayes,support 
vector machines,svm"/>
+<title>MADlib: k-Means Clustering</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+  $(document).ready(initResizable);
+/* @license-end */</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+  $(document).ready(function() { init_search(); });
+/* @license-end */
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script type="text/javascript" async 
src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js";></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="eigen_navtree_hacks.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new 
Date();a=s.createElement(o),
+  
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'madlib.apache.org');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.apache.org";><img alt="Logo" 
src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.15.1</span>
+   </div>
+   <div id="projectbrief">User Documentation for Apache MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" 
href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" 
border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.14 -->
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+/* @license-end */
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+$(document).ready(function(){initNavTree('group__grp__kmeans.html','');});
+/* @license-end */
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">k-Means Clustering<div class="ingroups"><a class="el" 
href="group__grp__unsupervised.html">Unsupervised Learning</a> &raquo; <a 
class="el" href="group__grp__clustering.html">Clustering</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> <ul>
+<li class="level1">
+<a href="#train">Training Function</a> </li>
+<li class="level1">
+<a href="#output">Output Format</a> </li>
+<li class="level1">
+<a href="#assignment">Cluster Assignment</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#notes">Notes</a> </li>
+<li class="level1">
+<a href="#background">Technical Background</a> </li>
+<li class="level1">
+<a href="#literature">Literature</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>Clustering refers to the problem of partitioning a set of objects 
according to some problem-dependent measure of <em>similarity</em>. In the 
k-means variant, given \( n \) points \( x_1, \dots, x_n \in \mathbb R^d \), 
the goal is to position \( k \) centroids \( c_1, \dots, c_k \in \mathbb R^d \) 
so that the sum of <em>distances</em> between each point and its closest 
centroid is minimized. Each centroid represents a cluster that consists of all 
points to which this centroid is closest.</p>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training 
Function</dt><dd></dd></dl>
+<p>The k-means algorithm can be invoked in four ways, depending on the source 
of the initial set of centroids:</p>
+<ul>
+<li>Use the random centroid seeding method. <pre class="syntax">
+kmeans_random( rel_source,
+               expr_point,
+               k,
+               fn_dist,
+               agg_centroid,
+               max_num_iterations,
+               min_frac_reassigned
+             )
+</pre></li>
+<li>Use the kmeans++ centroid seeding method. <pre class="syntax">
+kmeanspp( rel_source,
+          expr_point,
+          k,
+          fn_dist,
+          agg_centroid,
+          max_num_iterations,
+          min_frac_reassigned,
+          seeding_sample_ratio
+        )
+</pre></li>
+<li>Supply an initial centroid set in a relation identified by the 
<em>rel_initial_centroids</em> argument. <pre class="syntax">
+kmeans( rel_source,
+        expr_point,
+        rel_initial_centroids,
+        expr_centroid,
+        fn_dist,
+        agg_centroid,
+        max_num_iterations,
+        min_frac_reassigned
+      )
+</pre></li>
+<li>Provide an initial centroid set as an array expression in the 
<em>initial_centroids</em> argument. <pre class="syntax">
+kmeans( rel_source,
+        expr_point,
+        initial_centroids,
+        fn_dist,
+        agg_centroid,
+        max_num_iterations,
+        min_frac_reassigned
+      )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>rel_source </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the input data 
points.</p>
+<p>Data points and predefined centroids (if used) are expected to be stored 
row-wise, in a column of type <code><a class="el" 
href="group__grp__svec.html">SVEC</a></code> (or any type convertible to 
<code><a class="el" href="group__grp__svec.html">SVEC</a></code>, like 
<code>FLOAT[]</code> or <code>INTEGER[]</code>). Data points with non-finite 
values (NULL, NaN, infinity) in any component are skipped during analysis. </p>
+<p class="enddd"></p>
+</dd>
+<dt>expr_point </dt>
+<dd><p class="startdd">TEXT. The name of the column with point coordinates or 
an array expression.</p>
+<p class="enddd"></p>
+</dd>
+<dt>k </dt>
+<dd><p class="startdd">INTEGER. The number of centroids to calculate.</p>
+<p class="enddd"></p>
+</dd>
+<dt>fn_dist (optional) </dt>
+<dd><p class="startdd">TEXT, default: squared_dist_norm2'. The name of the 
function to use to calculate the distance from a data point to a centroid.</p>
+<p>The following distance functions can be used (computation of 
barycenter/mean in parentheses): </p><ul>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#aad193850e79c4b9d811ca9bc53e13476">dist_norm1</a></b>:
 1-norm/Manhattan (element-wise median [Note that MADlib does not provide a 
median aggregate function for support and performance reasons.]) </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#aa58e51526edea6ea98db30b6f250adb4">dist_norm2</a></b>:
 2-norm/Euclidean (element-wise mean) </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a00a08e69f27524f2096032214e15b668">squared_dist_norm2</a></b>:
 squared Euclidean distance (element-wise mean) </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a8c7b9281a72ff22caf06161701b27e84">dist_angle</a></b>:
 angle (element-wise mean of normalized points) </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#afa13b4c6122b99422d666dedea136c18">dist_tanimoto</a></b>:
 tanimoto (element-wise mean of normalized points <a 
href="#kmeans-lit-5">[5]</a>) </li>
+<li>
+<b>user defined function</b> with signature <code>DOUBLE PRECISION[] x, DOUBLE 
PRECISION[] y -&gt; DOUBLE PRECISION</code></li>
+</ul>
+<p class="enddd"></p>
+</dd>
+<dt>agg_centroid (optional) </dt>
+<dd><p class="startdd">TEXT, default: 'avg'. The name of the aggregate 
function used to determine centroids.</p>
+<p>The following aggregate functions can be used:</p><ul>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a1aa37f73fb1cd8d7d106aa518dd8c0b4">avg</a></b>: 
average (Default) </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a0b04663ca206f03e66aed5ea2b4cc461">normalized_avg</a></b>:
 normalized average</li>
+</ul>
+<p class="enddd"></p>
+</dd>
+<dt>max_num_iterations (optional) </dt>
+<dd><p class="startdd">INTEGER, default: 20. The maximum number of iterations 
to perform.</p>
+<p class="enddd"></p>
+</dd>
+<dt>min_frac_reassigned (optional) </dt>
+<dd><p class="startdd">DOUBLE PRECISION, default: 0.001. The minimum fraction 
of centroids reassigned to continue iterating. When fewer than this fraction of 
centroids are reassigned in an iteration, the calculation completes.</p>
+<p class="enddd"></p>
+</dd>
+<dt>seeding_sample_ratio (optional) </dt>
+<dd><p class="startdd">DOUBLE PRECISION, default: 1.0. The proportion of 
subsample of original dataset to use for kmeans++ centroid seeding method. 
Kmeans++ scans through the data sequentially 'k' times and can be too slow for 
big datasets. When 'seeding_sample_ratio' is greater than 0 (thresholded to be 
maximum value of 1.0), the seeding is run on an uniform random subsample of the 
data. Note: the final K-means algorithm is run on the complete dataset. This 
parameter only builds a subsample for the seeding and is only available for 
kmeans++.</p>
+<p class="enddd"></p>
+</dd>
+<dt>rel_initial_centroids </dt>
+<dd><p class="startdd">TEXT. The set of initial centroids. </p>
+<p class="enddd"></p>
+</dd>
+<dt>expr_centroid </dt>
+<dd><p class="startdd">TEXT. The name of the column (or the array expression) 
in the <em>rel_initial_centroids</em> relation that contains the centroid 
coordinates.</p>
+<p class="enddd"></p>
+</dd>
+<dt>initial_centroids </dt>
+<dd>TEXT. A string containing a DOUBLE PRECISION array expression with the 
initial centroid coordinates. </dd>
+</dl>
+</li>
+</ul>
+<p><a class="anchor" id="output"></a></p><dl class="section user"><dt>Output 
Format</dt><dd></dd></dl>
+<p>The output of the k-means module is a composite type with the following 
columns: </p><table class="output">
+<tr>
+<th>centroids </th><td>DOUBLE PRECISION[][]. The final centroid positions.  
</td></tr>
+<tr>
+<th>cluster_variance </th><td>DOUBLE PRECISION[]. The value of the objective 
function per cluster.  </td></tr>
+<tr>
+<th>objective_fn </th><td>DOUBLE PRECISION. The value of the objective 
function.  </td></tr>
+<tr>
+<th>frac_reassigned </th><td>DOUBLE PRECISION. The fraction of points 
reassigned in the last iteration.  </td></tr>
+<tr>
+<th>num_iterations </th><td>INTEGER. The total number of iterations executed.  
</td></tr>
+</table>
+<p><a class="anchor" id="assignment"></a></p><dl class="section 
user"><dt>Cluster Assignment</dt><dd></dd></dl>
+<p>After training, the cluster assignment for each data point can be computed 
with the help of the following function:</p>
+<pre class="syntax">
+closest_column( m, x )
+</pre><p><b>Argument</b> </p><dl class="arglist">
+<dt>m </dt>
+<dd>DOUBLE PRECISION[][]. The learned centroids from the training function. 
</dd>
+<dt>x </dt>
+<dd>DOUBLE PRECISION[]. The data point. </dd>
+</dl>
+<p><b>Output format</b> </p><table class="output">
+<tr>
+<th>column_id </th><td>INTEGER. The cluster assignment (zero-based). </td></tr>
+<tr>
+<th>distance </th><td>DOUBLE PRECISION. The distance to the cluster centroid. 
</td></tr>
+</table>
+<p><a class="anchor" id="examples"></a></p><dl class="section 
user"><dt>Examples</dt><dd></dd></dl>
+<p>Note: Your results may not be exactly the same as below due to the nature 
of the k-means algorithm.</p>
+<ol type="1">
+<li>Prepare some input data: <pre class="example">
+DROP TABLE IF EXISTS km_sample;
+CREATE TABLE km_sample(pid int, points double precision[]);
+INSERT INTO km_sample VALUES
+(1,  '{14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.0600, 0.2800, 2.29, 5.64, 1.04, 
3.92, 1065}'),
+(2,  '{13.2, 1.78, 2.14, 11.2, 1, 2.65, 2.76, 0.26, 1.28, 4.38, 1.05, 3.49, 
1050}'),
+(3,  '{13.16, 2.36,  2.67, 18.6, 101, 2.8,  3.24, 0.3, 2.81, 5.6799, 1.03, 
3.17, 1185}'),
+(4,  '{14.37, 1.95, 2.5, 16.8, 113, 3.85, 3.49, 0.24, 2.18, 7.8, 0.86, 3.45, 
1480}'),
+(5,  '{13.24, 2.59, 2.87, 21, 118, 2.8, 2.69, 0.39, 1.82, 4.32, 1.04, 2.93, 
735}'),
+(6,  '{14.2, 1.76, 2.45, 15.2, 112, 3.27, 3.39, 0.34, 1.97, 6.75, 1.05, 2.85, 
1450}'),
+(7,  '{14.39, 1.87, 2.45, 14.6, 96, 2.5, 2.52, 0.3, 1.98, 5.25, 1.02, 3.58, 
1290}'),
+(8,  '{14.06, 2.15, 2.61, 17.6, 121, 2.6, 2.51, 0.31, 1.25, 5.05, 1.06, 3.58, 
1295}'),
+(9,  '{14.83, 1.64, 2.17, 14, 97, 2.8, 2.98, 0.29, 1.98, 5.2, 1.08, 2.85, 
1045}'),
+(10, '{13.86, 1.35, 2.27, 16, 98, 2.98, 3.15, 0.22, 1.8500, 7.2199, 1.01, 
3.55, 1045}');
+</pre></li>
+<li>Run k-means clustering using kmeans++ for centroid seeding: <pre 
class="example">
+DROP TABLE IF EXISTS km_result;
+-- Run kmeans algorithm
+CREATE TABLE km_result AS
+SELECT * FROM madlib.kmeanspp('km_sample', 'points', 2,
+                           'madlib.squared_dist_norm2',
+                           'madlib.avg', 20, 0.001);
+\x on;
+SELECT * FROM km_result;
+</pre> Result: <pre class="result">
+centroids        | 
{{14.036,2.018,2.536,16.56,108.6,3.004,3.03,0.298,2.038,6.10598,1.004,3.326,1340},{13.872,1.814,2.376,15.56,88.2,2.806,2.928,0.288,1.844,5.35198,1.044,3.348,988}}
+cluster_variance | {60672.638245208,90512.324426408}
+objective_fn     | 151184.962671616
+frac_reassigned  | 0
+num_iterations   | 2
+</pre></li>
+<li>Calculate the simplified silhouette coefficient: <pre class="example">
+SELECT * FROM madlib.simple_silhouette( 'km_sample',
+                                        'points',
+                                        (SELECT centroids FROM km_result),
+                                        'madlib.dist_norm2'
+                                      );
+</pre> Result: <pre class="result">
+simple_silhouette | 0.68978804882941
+</pre></li>
+<li>Find the cluster assignment for each point: <pre class="example">
+\x off;
+-- Get point assignment
+SELECT data.*,  (madlib.closest_column(centroids, points)).column_id as 
cluster_id
+FROM km_sample as data, km_result
+ORDER BY data.pid;
+</pre> Result: <pre class="result">
+ pid |                               points                               | 
cluster_id
+-----+--------------------------------------------------------------------+------------
+   1 | {14.23,1.71,2.43,15.6,127,2.8,3.06,0.28,2.29,5.64,1.04,3.92,1065}  |    
      1
+   2 | {13.2,1.78,2.14,11.2,1,2.65,2.76,0.26,1.28,4.38,1.05,3.49,1050}    |    
      1
+   3 | {13.16,2.36,2.67,18.6,101,2.8,3.24,0.3,2.81,5.6799,1.03,3.17,1185} |    
      0
+   4 | {14.37,1.95,2.5,16.8,113,3.85,3.49,0.24,2.18,7.8,0.86,3.45,1480}   |    
      0
+   5 | {13.24,2.59,2.87,21,118,2.8,2.69,0.39,1.82,4.32,1.04,2.93,735}     |    
      1
+   6 | {14.2,1.76,2.45,15.2,112,3.27,3.39,0.34,1.97,6.75,1.05,2.85,1450}  |    
      0
+   7 | {14.39,1.87,2.45,14.6,96,2.5,2.52,0.3,1.98,5.25,1.02,3.58,1290}    |    
      0
+   8 | {14.06,2.15,2.61,17.6,121,2.6,2.51,0.31,1.25,5.05,1.06,3.58,1295}  |    
      0
+   9 | {14.83,1.64,2.17,14,97,2.8,2.98,0.29,1.98,5.2,1.08,2.85,1045}      |    
      1
+  10 | {13.86,1.35,2.27,16,98,2.98,3.15,0.22,1.85,7.2199,1.01,3.55,1045}  |    
      1
+(10 rows)
+</pre></li>
+<li>Unnest the cluster centroids 2-D array to get a set of 1-D centroid 
arrays: <pre class="example">
+DROP TABLE IF EXISTS km_centroids_unnest;
+-- Run unnest function
+CREATE TABLE km_centroids_unnest AS
+SELECT (madlib.array_unnest_2d_to_1d(centroids)).*
+FROM km_result;
+SELECT * FROM km_centroids_unnest ORDER BY 1;
+</pre> Result: <pre class="result">
+ unnest_row_id |                                  unnest_result
+---------------+----------------------------------------------------------------------------------
+             1 | 
{14.036,2.018,2.536,16.56,108.6,3.004,3.03,0.298,2.038,6.10598,1.004,3.326,1340}
+             2 | 
{13.872,1.814,2.376,15.56,88.2,2.806,2.928,0.288,1.844,5.35198,1.044,3.348,988}
+(2 rows)
+</pre> Note that the ID column returned by <a class="el" 
href="array__ops_8sql__in.html#af057b589f2a2cb1095caa99feaeb3d70" title="This 
function takes a 2-D array as the input and unnests it by one level. It returns 
a set of 1-D arr...">array_unnest_2d_to_1d()</a> is not guaranteed to be the 
same as the cluster ID assigned by k-means. See below to create the correct 
cluster IDs.</li>
+<li>Create cluster IDs for 1-D centroid arrays so that cluster ID for any 
centroid can be matched to the cluster assignment for the data points: <pre 
class="example">
+SELECT cent.*,  (madlib.closest_column(centroids, unnest_result)).column_id as 
cluster_id
+FROM km_centroids_unnest as cent, km_result
+ORDER BY cent.unnest_row_id;
+</pre> Result: <pre class="result">
+ unnest_row_id |                                  unnest_result                
                   | cluster_id
+---------------+----------------------------------------------------------------------------------+------------
+             1 | 
{14.036,2.018,2.536,16.56,108.6,3.004,3.03,0.298,2.038,6.10598,1.004,3.326,1340}
 |          0
+             2 | 
{13.872,1.814,2.376,15.56,88.2,2.806,2.928,0.288,1.844,5.35198,1.044,3.348,988} 
 |          1
+(2 rows)
+</pre></li>
+<li>Run the same example as above, but using array input. Create the input 
table: <pre class="example">
+DROP TABLE IF EXISTS km_arrayin CASCADE;
+CREATE TABLE km_arrayin(pid int,
+                        p1 float,
+                        p2 float,
+                        p3 float,
+                        p4 float,
+                        p5 float,
+                        p6 float,
+                        p7 float,
+                        p8 float,
+                        p9 float,
+                        p10 float,
+                        p11 float,
+                        p12 float,
+                        p13 float);
+INSERT INTO km_arrayin VALUES
+(1,  14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.0600, 0.2800, 2.29, 5.64, 1.04, 
3.92, 1065),
+(2,  13.2, 1.78, 2.14, 11.2, 1, 2.65, 2.76, 0.26, 1.28, 4.38, 1.05, 3.49, 
1050),
+(3,  13.16, 2.36,  2.67, 18.6, 101, 2.8,  3.24, 0.3, 2.81, 5.6799, 1.03, 3.17, 
1185),
+(4,  14.37, 1.95, 2.5, 16.8, 113, 3.85, 3.49, 0.24, 2.18, 7.8, 0.86, 3.45, 
1480),
+(5,  13.24, 2.59, 2.87, 21, 118, 2.8, 2.69, 0.39, 1.82, 4.32, 1.04, 2.93, 735),
+(6,  14.2, 1.76, 2.45, 15.2, 112, 3.27, 3.39, 0.34, 1.97, 6.75, 1.05, 2.85, 
1450),
+(7,  14.39, 1.87, 2.45, 14.6, 96, 2.5, 2.52, 0.3, 1.98, 5.25, 1.02, 3.58, 
1290),
+(8,  14.06, 2.15, 2.61, 17.6, 121, 2.6, 2.51, 0.31, 1.25, 5.05, 1.06, 3.58, 
1295),
+(9,  14.83, 1.64, 2.17, 14, 97, 2.8, 2.98, 0.29, 1.98, 5.2, 1.08, 2.85, 1045),
+(10, 13.86, 1.35, 2.27, 16, 98, 2.98, 3.15, 0.22, 1.8500, 7.2199, 1.01, 3.55, 
1045);
+</pre> Now find the cluster assignment for each point: <pre class="example">
+DROP TABLE IF EXISTS km_result;
+-- Run kmeans algorithm
+CREATE TABLE km_result AS
+SELECT * FROM madlib.kmeans_random('km_arrayin',
+                                'ARRAY[p1, p2, p3, p4, p5, p6,
+                                      p7, p8, p9, p10, p11, p12, p13]',
+                                2,
+                                'madlib.squared_dist_norm2',
+                                'madlib.avg',
+                                20,
+                                0.001);
+-- Get point assignment
+SELECT data.*,  (madlib.closest_column(centroids,
+                                       ARRAY[p1, p2, p3, p4, p5, p6,
+                                      p7, p8, p9, p10, p11, p12, 
p13])).column_id as cluster_id
+FROM km_arrayin as data, km_result
+ORDER BY data.pid;
+</pre> This produces the result in column format: <pre class="result">
+ pid |  p1   |  p2  |  p3  |  p4  | p5  |  p6  |  p7  |  p8  |  p9  |  p10   | 
p11  | p12  | p13  | cluster_id
+-----+-------+------+------+------+-----+------+------+------+------+--------+------+------+------+------------
+   1 | 14.23 | 1.71 | 2.43 | 15.6 | 127 |  2.8 | 3.06 | 0.28 | 2.29 |   5.64 | 
1.04 | 3.92 | 1065 |          0
+   2 |  13.2 | 1.78 | 2.14 | 11.2 |   1 | 2.65 | 2.76 | 0.26 | 1.28 |   4.38 | 
1.05 | 3.49 | 1050 |          0
+   3 | 13.16 | 2.36 | 2.67 | 18.6 | 101 |  2.8 | 3.24 |  0.3 | 2.81 | 5.6799 | 
1.03 | 3.17 | 1185 |          0
+   4 | 14.37 | 1.95 |  2.5 | 16.8 | 113 | 3.85 | 3.49 | 0.24 | 2.18 |    7.8 | 
0.86 | 3.45 | 1480 |          1
+   5 | 13.24 | 2.59 | 2.87 |   21 | 118 |  2.8 | 2.69 | 0.39 | 1.82 |   4.32 | 
1.04 | 2.93 |  735 |          0
+   6 |  14.2 | 1.76 | 2.45 | 15.2 | 112 | 3.27 | 3.39 | 0.34 | 1.97 |   6.75 | 
1.05 | 2.85 | 1450 |          1
+   7 | 14.39 | 1.87 | 2.45 | 14.6 |  96 |  2.5 | 2.52 |  0.3 | 1.98 |   5.25 | 
1.02 | 3.58 | 1290 |          1
+   8 | 14.06 | 2.15 | 2.61 | 17.6 | 121 |  2.6 | 2.51 | 0.31 | 1.25 |   5.05 | 
1.06 | 3.58 | 1295 |          1
+   9 | 14.83 | 1.64 | 2.17 |   14 |  97 |  2.8 | 2.98 | 0.29 | 1.98 |    5.2 | 
1.08 | 2.85 | 1045 |          0
+  10 | 13.86 | 1.35 | 2.27 |   16 |  98 | 2.98 | 3.15 | 0.22 | 1.85 | 7.2199 | 
1.01 | 3.55 | 1045 |          0
+(10 rows)
+</pre></li>
+</ol>
+<p><a class="anchor" id="notes"></a></p><dl class="section 
user"><dt>Notes</dt><dd></dd></dl>
+<p>The algorithm stops when one of the following conditions is met:</p><ul>
+<li>The fraction of updated points is smaller than the convergence threshold 
(<em>min_frac_reassigned</em> argument). (Default: 0.001).</li>
+<li>The algorithm reaches the maximum number of allowed iterations 
(<em>max_num_iterations</em> argument). (Default: 20).</li>
+</ul>
+<p>A popular method to assess the quality of the clustering is the 
<em>silhouette coefficient</em>, a simplified version of which is provided as 
part of the k-means module. Note that for large data sets, this computation is 
expensive.</p>
+<p>The silhouette function has the following syntax: </p><pre class="syntax">
+simple_silhouette( rel_source,
+                   expr_point,
+                   centroids,
+                   fn_dist
+                 )
+</pre><p> <b>Arguments</b> </p><dl class="arglist">
+<dt>rel_source </dt>
+<dd>TEXT. The name of the relation containing the input point. </dd>
+<dt>expr_point </dt>
+<dd>TEXT. An expression evaluating to point coordinates for each row in the 
relation. </dd>
+<dt>centroids </dt>
+<dd>TEXT. An expression evaluating to an array of centroids.  </dd>
+<dt>fn_dist (optional) </dt>
+<dd>TEXT, default 'dist_norm2', The name of a function to calculate the 
distance of a point from a centroid. See the <em>fn_dist</em> argument of the 
k-means training function. </dd>
+</dl>
+<p><a class="anchor" id="background"></a></p><dl class="section 
user"><dt>Technical Background</dt><dd></dd></dl>
+<p>Formally, we wish to minimize the following objective function: </p><p 
class="formulaDsp">
+\[ (c_1, \dots, c_k) \mapsto \sum_{i=1}^n \min_{j=1}^k 
\operatorname{dist}(x_i, c_j) \]
+</p>
+<p> In the most common case, \( \operatorname{dist} \) is the square of the 
Euclidean distance.</p>
+<p>This problem is computationally difficult (NP-hard), yet the local-search 
heuristic proposed by Lloyd [4] performs reasonably well in practice. In fact, 
it is so ubiquitous today that it is often referred to as the <em>standard 
algorithm</em> or even just the <em>k-means algorithm</em> [1]. It works as 
follows:</p>
+<ol type="1">
+<li>Seed the \( k \) centroids (see below)</li>
+<li>Repeat until convergence:<ol type="a">
+<li>Assign each point to its closest centroid</li>
+<li>Move each centroid to a position that minimizes the sum of distances in 
this cluster</li>
+</ol>
+</li>
+<li>Convergence is achieved when no points change their assignments during 
step 2a.</li>
+</ol>
+<p>Since the objective function decreases in every step, this algorithm is 
guaranteed to converge to a local optimum.</p>
+<p><a class="anchor" id="literature"></a></p><dl class="section 
user"><dt>Literature</dt><dd></dd></dl>
+<p><a class="anchor" id="kmeans-lit-1"></a>[1] Wikipedia, K-means Clustering, 
<a 
href="http://en.wikipedia.org/wiki/K-means_clustering";>http://en.wikipedia.org/wiki/K-means_clustering</a></p>
+<p><a class="anchor" id="kmeans-lit-2"></a>[2] David Arthur, Sergei 
Vassilvitskii: k-means++: the advantages of careful seeding, Proceedings of the 
18th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA'07), pp. 1027-1035, 
<a 
href="http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf";>http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf</a></p>
+<p><a class="anchor" id="kmeans-lit-3"></a>[3] E. R. Hruschka, L. N. C. Silva, 
R. J. G. B. Campello: Clustering Gene-Expression Data: A Hybrid Approach that 
Iterates Between k-Means and Evolutionary Search. In: Studies in Computational 
Intelligence - Hybrid Evolutionary Algorithms. pp. 313-335. Springer. 2007.</p>
+<p><a class="anchor" id="kmeans-lit-4"></a>[4] Lloyd, Stuart: Least squares 
quantization in PCM. Technical Note, Bell Laboratories. Published much later 
in: IEEE Transactions on Information Theory 28(2), pp. 128-137. 1982.</p>
+<p><a class="anchor" id="kmeans-lit-5"></a>[5] Leisch, Friedrich: A Toolbox 
for K-Centroids Cluster Analysis. In: Computational Statistics and Data 
Analysis, 51(2). pp. 526-544. 2006.</p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related 
Topics</dt><dd></dd></dl>
+<p>File <a class="el" href="kmeans_8sql__in.html" title="Set of functions for 
k-means clustering. ">kmeans.sql_in</a> documenting the k-Means SQL 
functions</p>
+<p><a class="el" href="group__grp__svec.html">Sparse Vectors</a></p>
+<p>simple_silhouette()</p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Oct 15 2018 11:24:30 for MADlib by
+    <a href="http://www.doxygen.org/index.html";>
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/madlib-site/blob/af0e5f14/docs/v1.15.1/group__grp__knn.html
----------------------------------------------------------------------
diff --git a/docs/v1.15.1/group__grp__knn.html 
b/docs/v1.15.1/group__grp__knn.html
new file mode 100644
index 0000000..97cf49f
--- /dev/null
+++ b/docs/v1.15.1/group__grp__knn.html
@@ -0,0 +1,411 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
+<html xmlns="http://www.w3.org/1999/xhtml";>
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.14"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data 
mining,deep learning,ensemble methods,data science,market basket 
analysis,affinity analysis,pca,lda,regression,elastic net,huber 
white,proportional hazards,k-means,latent dirichlet allocation,bayes,support 
vector machines,svm"/>
+<title>MADlib: k-Nearest Neighbors</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+  $(document).ready(initResizable);
+/* @license-end */</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+  $(document).ready(function() { init_search(); });
+/* @license-end */
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script type="text/javascript" async 
src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js";></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="eigen_navtree_hacks.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new 
Date();a=s.createElement(o),
+  
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'madlib.apache.org');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.apache.org";><img alt="Logo" 
src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.15.1</span>
+   </div>
+   <div id="projectbrief">User Documentation for Apache MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" 
href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" 
border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.14 -->
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+/* @license-end */
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+/* @license 
magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt 
GPL-v2 */
+$(document).ready(function(){initNavTree('group__grp__knn.html','');});
+/* @license-end */
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">k-Nearest Neighbors<div class="ingroups"><a class="el" 
href="group__grp__early__stage.html">Early Stage Development</a></div></div>  
</div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> <ul>
+<li class="level1">
+<a href="#knn">K-Nearest Neighbors</a> </li>
+<li class="level1">
+<a href="#usage">Usage</a> </li>
+<li class="level1">
+<a href="#output">Output Format</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#background">Technical Background</a> </li>
+<li class="level1">
+<a href="#literature">Literature</a> </li>
+</ul>
+</div><dl class="section warning"><dt>Warning</dt><dd><em> This MADlib method 
is still in early stage development. There may be some issues that will be 
addressed in a future version. Interface and implementation are subject to 
change. </em></dd></dl>
+<p><a class="anchor" id="knn"></a> K-nearest neighbors is a method for finding 
the k closest points to a given data point in terms of a given metric. Its 
input consists of data points as features from testing examples, and it looks 
for k closest points in the training set for each of the data points in test 
set. The output of KNN depends on the type of task. For classification, the 
output is the majority vote of the classes of the k nearest data points. That 
is, the testing example gets assigned the most popular class from the nearest 
neighbors. For regression, the output is the average of the values of k nearest 
neighbors of the given test point.</p>
+<p><a class="anchor" id="usage"></a></p><dl class="section 
user"><dt>Usage</dt><dd><pre class="syntax">
+knn( point_source,
+     point_column_name,
+     point_id,
+     label_column_name,
+     test_source,
+     test_column_name,
+     test_id,
+     output_table,
+     k,
+     output_neighbors,
+     fn_dist,
+     weighted_avg
+   )
+</pre></dd></dl>
+<p><b>Arguments</b> </p><dl class="arglist">
+<dt>point_source </dt>
+<dd><p class="startdd">TEXT. Name of the table containing the training data 
points. Training data points are expected to be stored row-wise in a column of 
type <code>DOUBLE PRECISION[]</code>. </p>
+<p class="enddd"></p>
+</dd>
+<dt>point_column_name </dt>
+<dd><p class="startdd">TEXT. Name of the column with training data points or 
expression that evaluates to a numeric array</p>
+<p class="enddd"></p>
+</dd>
+<dt>point_id </dt>
+<dd><p class="startdd">TEXT. Name of the column in 'point_source’ containing 
source data ids. The ids are of type INTEGER with no duplicates. They do not 
need to be contiguous. This parameter must be used if the list of nearest 
neighbors are to be output, i.e., if the parameter 'output_neighbors' below is 
TRUE or if 'label_column_name' is NULL.</p>
+<p class="enddd"></p>
+</dd>
+<dt>label_column_name </dt>
+<dd><p class="startdd">TEXT. Name of the column with labels/values of training 
data points. If this column is a Boolean, integer or text, it will run KNN 
classification, else if it is double precision values will run KNN regression. 
If you set this to NULL, it will only return the set of neighbors without 
actually doing classification or regression.</p>
+<p class="enddd"></p>
+</dd>
+<dt>test_source </dt>
+<dd><p class="startdd">TEXT. Name of the table containing the test data 
points. Testing data points are expected to be stored row-wise in a column of 
type <code>DOUBLE PRECISION[]</code>. </p>
+<p class="enddd"></p>
+</dd>
+<dt>test_column_name </dt>
+<dd><p class="startdd">TEXT. Name of the column with testing data points or 
expression that evaluates to a numeric array</p>
+<p class="enddd"></p>
+</dd>
+<dt>test_id </dt>
+<dd><p class="startdd">TEXT. Name of the column having ids of data points in 
test data table.</p>
+<p class="enddd"></p>
+</dd>
+<dt>output_table </dt>
+<dd><p class="startdd">TEXT. Name of the table to store final results.</p>
+<p class="enddd"></p>
+</dd>
+<dt>k (optional) </dt>
+<dd><p class="startdd">INTEGER. default: 1. Number of nearest neighbors to 
consider. For classification, should be an odd number to break ties, otherwise 
the result may depend on ordering of the input data.</p>
+<p class="enddd"></p>
+</dd>
+<dt>output_neighbors (optional)  </dt>
+<dd><p class="startdd">BOOLEAN default: TRUE. Outputs the list of k-nearest 
neighbors that were used in the voting/averaging, sorted from closest to 
furthest.</p>
+<p class="enddd"></p>
+</dd>
+<dt>fn_dist (optional) </dt>
+<dd><p class="startdd">TEXT, default: 'squared_dist_norm2'. The name of the 
function used to calculate the distance between data points.</p>
+<p>The following distance functions can be used: </p><ul>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#aad193850e79c4b9d811ca9bc53e13476">dist_norm1</a></b>:
 1-norm/Manhattan </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#aa58e51526edea6ea98db30b6f250adb4">dist_norm2</a></b>:
 2-norm/Euclidean </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a00a08e69f27524f2096032214e15b668">squared_dist_norm2</a></b>:
 squared Euclidean distance </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#a8c7b9281a72ff22caf06161701b27e84">dist_angle</a></b>:
 angle </li>
+<li>
+<b><a class="el" 
href="linalg_8sql__in.html#afa13b4c6122b99422d666dedea136c18">dist_tanimoto</a></b>:
 tanimoto </li>
+<li>
+<b>user defined function</b> with signature <code>DOUBLE PRECISION[] x, DOUBLE 
PRECISION[] y -&gt; DOUBLE PRECISION</code></li>
+</ul>
+<p class="enddd"></p>
+</dd>
+<dt>weighted_avg (optional) </dt>
+<dd><p class="startdd">BOOLEAN, default: FALSE. Calculates classification or 
regression values using a weighted average. The idea is to weigh the 
contribution of each of the k neighbors according to their distance to the test 
point, giving greater influence to closer neighbors. The distance function 
'fn_dist' specified above is used.</p>
+<p>For classification, majority voting weighs a neighbor according to inverse 
distance.</p>
+<p class="enddd">For regression, the inverse distance weighting approach is 
used from Shepard [4]. </p>
+</dd>
+</dl>
+<p><a class="anchor" id="output"></a></p><dl class="section user"><dt>Output 
Format</dt><dd></dd></dl>
+<p>The output of the KNN module is a table with the following columns: 
</p><table class="output">
+<tr>
+<th>id </th><td>INTEGER. The ids of test data points.  </td></tr>
+<tr>
+<th>test_column_name </th><td>DOUBLE PRECISION[]. The test data points.  
</td></tr>
+<tr>
+<th>prediction </th><td>INTEGER. Label in case of classification, average 
value in case of regression.  </td></tr>
+<tr>
+<th>k_nearest_neighbours </th><td>INTEGER[]. List of nearest neighbors, sorted 
closest to furthest from the corresponding test point.  </td></tr>
+</table>
+<p><a class="anchor" id="examples"></a></p><dl class="section 
user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>Prepare some training data for classification: <pre class="example">
+DROP TABLE IF EXISTS knn_train_data;
+CREATE TABLE knn_train_data (
+                    id integer,
+                    data integer[],
+                    label integer  -- Integer label means for classification
+                    );
+INSERT INTO knn_train_data VALUES
+(1, '{1,1}', 1),
+(2, '{2,2}', 1),
+(3, '{3,3}', 1),
+(4, '{4,4}', 1),
+(5, '{4,5}', 1),
+(6, '{20,50}', 0),
+(7, '{10,31}', 0),
+(8, '{81,13}', 0),
+(9, '{1,111}', 0);
+</pre></li>
+<li>Prepare some training data for regression: <pre class="example">
+DROP TABLE IF EXISTS knn_train_data_reg;
+CREATE TABLE knn_train_data_reg (
+                    id integer,
+                    data integer[],
+                    label float  -- Float label means for regression
+                    );
+INSERT INTO knn_train_data_reg VALUES
+(1, '{1,1}', 1.0),
+(2, '{2,2}', 1.0),
+(3, '{3,3}', 1.0),
+(4, '{4,4}', 1.0),
+(5, '{4,5}', 1.0),
+(6, '{20,50}', 0.0),
+(7, '{10,31}', 0.0),
+(8, '{81,13}', 0.0),
+(9, '{1,111}', 0.0);
+</pre></li>
+<li>Prepare some testing data: <pre class="example">
+DROP TABLE IF EXISTS knn_test_data;
+CREATE TABLE knn_test_data (
+                    id integer,
+                    data integer[]
+                    );
+INSERT INTO knn_test_data VALUES
+(1, '{2,1}'),
+(2, '{2,6}'),
+(3, '{15,40}'),
+(4, '{12,1}'),
+(5, '{2,90}'),
+(6, '{50,45}');
+</pre></li>
+<li>Run KNN for classification: <pre class="example">
+DROP TABLE IF EXISTS knn_result_classification;
+SELECT * FROM madlib.knn(
+                'knn_train_data',      -- Table of training data
+                'data',                -- Col name of training data
+                'id',                  -- Col name of id in train data
+                'label',               -- Training labels
+                'knn_test_data',       -- Table of test data
+                'data',                -- Col name of test data
+                'id',                  -- Col name of id in test data
+                'knn_result_classification',  -- Output table
+                 3,                    -- Number of nearest neighbors
+                 True,                 -- True to list nearest-neighbors by id
+                 'madlib.squared_dist_norm2' -- Distance function
+                );
+SELECT * from knn_result_classification ORDER BY id;
+</pre> Result: <pre class="result">
+  id |  data   | prediction | k_nearest_neighbours
+----+---------+------------+----------------------
+  1 | {2,1}   |          1 | {2,1,3}
+  2 | {2,6}   |          1 | {5,4,3}
+  3 | {15,40} |          0 | {7,6,5}
+  4 | {12,1}  |          1 | {4,5,3}
+  5 | {2,90}  |          0 | {9,6,7}
+  6 | {50,45} |          0 | {6,7,8}
+(6 rows)
+</pre> Note that the nearest neighbors are sorted from closest to furthest 
from the corresponding test point.</li>
+<li>Run KNN for regression: <pre class="example">
+DROP TABLE IF EXISTS knn_result_regression;
+SELECT * FROM madlib.knn(
+                'knn_train_data_reg',  -- Table of training data
+                'data',                -- Col name of training data
+                'id',                  -- Col Name of id in train data
+                'label',               -- Training labels
+                'knn_test_data',       -- Table of test data
+                'data',                -- Col name of test data
+                'id',                  -- Col name of id in test data
+                'knn_result_regression',  -- Output table
+                 3,                    -- Number of nearest neighbors
+                True,                  -- True to list nearest-neighbors by id
+                'madlib.dist_norm2'    -- Distance function
+                );
+SELECT * FROM knn_result_regression ORDER BY id;
+</pre> Result: <pre class="result">
+ id |  data   |    prediction     | k_nearest_neighbours
+----+---------+-------------------+----------------------
+  1 | {2,1}   |                 1 | {2,1,3}
+  2 | {2,6}   |                 1 | {5,4,3}
+  3 | {15,40} | 0.333333333333333 | {7,6,5}
+  4 | {12,1}  |                 1 | {4,5,3}
+  5 | {2,90}  |                 0 | {9,6,7}
+  6 | {50,45} |                 0 | {6,7,8}
+(6 rows)
+</pre></li>
+<li>List nearest neighbors only, without doing classification or regression: 
<pre class="example">
+DROP TABLE IF EXISTS knn_result_list_neighbors;
+SELECT * FROM madlib.knn(
+                'knn_train_data_reg',  -- Table of training data
+                'data',                -- Col name of training data
+                'id',                  -- Col Name of id in train data
+                NULL,                  -- NULL training labels means just list 
neighbors
+                'knn_test_data',       -- Table of test data
+                'data',                -- Col name of test data
+                'id',                  -- Col name of id in test data
+                'knn_result_list_neighbors', -- Output table
+                3                      -- Number of nearest neighbors
+                );
+SELECT * FROM knn_result_list_neighbors ORDER BY id;
+</pre> Result, with neighbors sorted from closest to furthest: <pre 
class="result">
+ id |  data   | k_nearest_neighbours
+----+---------+----------------------
+  1 | {2,1}   | {2,1,3}
+  2 | {2,6}   | {5,4,3}
+  3 | {15,40} | {7,6,5}
+  4 | {12,1}  | {4,5,3}
+  5 | {2,90}  | {9,6,7}
+  6 | {50,45} | {6,7,8}
+(6 rows)
+</pre></li>
+<li>Run KNN for classification using the weighted average: <pre 
class="example">
+DROP TABLE IF EXISTS knn_result_classification;
+SELECT * FROM madlib.knn(
+                'knn_train_data',      -- Table of training data
+                'data',                -- Col name of training data
+                'id',                  -- Col name of id in train data
+                'label',               -- Training labels
+                'knn_test_data',       -- Table of test data
+                'data',                -- Col name of test data
+                'id',                  -- Col name of id in test data
+                'knn_result_classification',  -- Output table
+                 3,                    -- Number of nearest neighbors
+                 True,                 -- True to list nearest-neighbors by id
+                 'madlib.squared_dist_norm2', -- Distance function
+                 True                 -- For weighted average
+                );
+SELECT * FROM knn_result_classification ORDER BY id;
+</pre> <pre class="result">
+ id |  data   |     prediction      | k_nearest_neighbours
+----+---------+---------------------+----------------------
+  1 | {2,1}   |                 1   | {2,1,3}
+  2 | {2,6}   |                 1   | {5,4,3}
+  3 | {15,40} |                 0   | {7,6,5}
+  4 | {12,1}  |                 1   | {4,5,3}
+  5 | {2,90}  |                 0   | {9,6,7}
+  6 | {50,45} |                 0   | {6,7,8}
+(6 rows)
+</pre></li>
+</ol>
+<p><a class="anchor" id="background"></a></p><dl class="section 
user"><dt>Technical Background</dt><dd></dd></dl>
+<p>The training data points are vectors in a multidimensional feature space, 
each with a class label. The training phase of the algorithm consists only of 
storing the feature vectors and class labels of the training points.</p>
+<p>In the classification phase, k is a user-defined constant, and an unlabeled 
vector (a test point) is classified by assigning the label which is most 
frequent among the k training samples nearest to that test point. In case of 
regression, average of the values of these k training samples is assigned to 
the test point.</p>
+<p><a class="anchor" id="literature"></a></p><dl class="section 
user"><dt>Literature</dt><dd></dd></dl>
+<p><a class="anchor" id="knn-lit-1"></a>[1] Wikipedia, k-nearest neighbors 
algorithm, <a 
href="https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm";>https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm</a></p>
+<p><a class="anchor" id="knn-lit-2"></a>[2] N. S. Altman: An Introduction to 
Kernel and Nearest-Neighbor Nonparametric Regression <a 
href="http://www.stat.washington.edu/courses/stat527/s13/readings/Altman_AmStat_1992.pdf";>http://www.stat.washington.edu/courses/stat527/s13/readings/Altman_AmStat_1992.pdf</a></p>
+<p><a class="anchor" id="knn-lit-3"></a>[3] Gongde Guo1, Hui Wang, David Bell, 
Yaxin Bi, Kieran Greer: KNN Model-Based Approach in Classification, <a 
href="https://ai2-s2-pdfs.s3.amazonaws.com/a7e2/814ec5db800d2f8c4313fd436e9cf8273821.pdf";>https://ai2-s2-pdfs.s3.amazonaws.com/a7e2/814ec5db800d2f8c4313fd436e9cf8273821.pdf</a></p>
+<p><a class="anchor" id="knn-lit-4"></a>[4] Shepard, Donald (1968). "A 
two-dimensional interpolation function for
+irregularly-spaced data". Proceedings of the 1968 ACM National Conference. pp. 
517–524.</p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Oct 15 2018 11:24:30 for MADlib by
+    <a href="http://www.doxygen.org/index.html";>
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li>
+  </ul>
+</div>
+</body>
+</html>

Reply via email to