http://git-wip-us.apache.org/repos/asf/madlib-site/blob/af0e5f14/docs/v1.15.1/group__grp__linreg.html ---------------------------------------------------------------------- diff --git a/docs/v1.15.1/group__grp__linreg.html b/docs/v1.15.1/group__grp__linreg.html new file mode 100644 index 0000000..280d5d0 --- /dev/null +++ b/docs/v1.15.1/group__grp__linreg.html @@ -0,0 +1,474 @@ +<!-- HTML header for doxygen 1.8.4--> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> +<meta http-equiv="X-UA-Compatible" content="IE=9"/> +<meta name="generator" content="Doxygen 1.8.14"/> +<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/> +<title>MADlib: Linear Regression</title> +<link href="tabs.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="jquery.js"></script> +<script type="text/javascript" src="dynsections.js"></script> +<link href="navtree.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="resize.js"></script> +<script type="text/javascript" src="navtreedata.js"></script> +<script type="text/javascript" src="navtree.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(initResizable); +/* @license-end */</script> +<link href="search/search.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="search/searchdata.js"></script> +<script type="text/javascript" src="search/search.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(function() { init_search(); }); +/* @license-end */ +</script> +<script type="text/x-mathjax-config"> + MathJax.Hub.Config({ + extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"], + jax: ["input/TeX","output/HTML-CSS"], +}); +</script><script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js"></script> +<!-- hack in the navigation tree --> +<script type="text/javascript" src="eigen_navtree_hacks.js"></script> +<link href="doxygen.css" rel="stylesheet" type="text/css" /> +<link href="madlib_extra.css" rel="stylesheet" type="text/css"/> +<!-- google analytics --> +<script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + ga('create', 'UA-45382226-1', 'madlib.apache.org'); + ga('send', 'pageview'); +</script> +</head> +<body> +<div id="top"><!-- do not remove this div, it is closed by doxygen! --> +<div id="titlearea"> +<table cellspacing="0" cellpadding="0"> + <tbody> + <tr style="height: 56px;"> + <td id="projectlogo"><a href="http://madlib.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td> + <td style="padding-left: 0.5em;"> + <div id="projectname"> + <span id="projectnumber">1.15.1</span> + </div> + <div id="projectbrief">User Documentation for Apache MADlib</div> + </td> + <td> <div id="MSearchBox" class="MSearchBoxInactive"> + <span class="left"> + <img id="MSearchSelect" src="search/mag_sel.png" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + alt=""/> + <input type="text" id="MSearchField" value="Search" accesskey="S" + onfocus="searchBox.OnSearchFieldFocus(true)" + onblur="searchBox.OnSearchFieldFocus(false)" + onkeyup="searchBox.OnSearchFieldChange(event)"/> + </span><span class="right"> + <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> + </span> + </div> +</td> + </tr> + </tbody> +</table> +</div> +<!-- end header part --> +<!-- Generated by Doxygen 1.8.14 --> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +var searchBox = new SearchBox("searchBox", "search",false,'Search'); +/* @license-end */ +</script> +</div><!-- top --> +<div id="side-nav" class="ui-resizable side-nav-resizable"> + <div id="nav-tree"> + <div id="nav-tree-contents"> + <div id="nav-sync" class="sync"></div> + </div> + </div> + <div id="splitbar" style="-moz-user-select:none;" + class="ui-resizable-handle"> + </div> +</div> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +$(document).ready(function(){initNavTree('group__grp__linreg.html','');}); +/* @license-end */ +</script> +<div id="doc-content"> +<!-- window showing the filter options --> +<div id="MSearchSelectWindow" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + onkeydown="return searchBox.OnSearchSelectKey(event)"> +</div> + +<!-- iframe showing the search results (closed by default) --> +<div id="MSearchResultsWindow"> +<iframe src="javascript:void(0)" frameborder="0" + name="MSearchResults" id="MSearchResults"> +</iframe> +</div> + +<div class="header"> + <div class="headertitle"> +<div class="title">Linear Regression<div class="ingroups"><a class="el" href="group__grp__super.html">Supervised Learning</a> » <a class="el" href="group__grp__regml.html">Regression Models</a></div></div> </div> +</div><!--header--> +<div class="contents"> +<div class="toc"><b>Contents</b> <ul> +<li class="level1"> +<a href="#train">Training Function</a> </li> +<li class="level1"> +<a href="#predict">Prediction Function</a> </li> +<li class="level1"> +<a href="#examples">Examples</a> </li> +<li class="level1"> +<a href="#background">Technical Background</a> </li> +<li class="level1"> +<a href="#literature">Literature</a> </li> +<li class="level1"> +<a href="#related">Related Topics</a> </li> +</ul> +</div><p>Linear regression models a linear relationship of a scalar dependent variable \( y \) to one or more explanatory independent variables \( x \) and builds a model of coefficients.</p> +<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd></dd></dl> +<p>The linear regression training function has the following syntax. </p><pre class="syntax"> +linregr_train( source_table, + out_table, + dependent_varname, + independent_varname, + grouping_cols, + heteroskedasticity_option + ) +</pre><p><b>Arguments</b> </p><dl class="arglist"> +<dt>source_table </dt> +<dd><p class="startdd">TEXT. Name of the table containing the training data.</p> +<p class="enddd"></p> +</dd> +<dt>out_table </dt> +<dd><p class="startdd">TEXT. Name of the generated table containing the output model.</p> +<p>The output table contains the following columns: </p><table class="output"> +<tr> +<th><...> </th><td>Any grouping columns provided during training. Present only if the grouping option is used. </td></tr> +<tr> +<th>coef </th><td>FLOAT8[]. Vector of the coefficients of the regression. </td></tr> +<tr> +<th>r2 </th><td>FLOAT8. R-squared coefficient of determination of the model. </td></tr> +<tr> +<th>std_err </th><td>FLOAT8[]. Vector of the standard error of the coefficients. </td></tr> +<tr> +<th>t_stats </th><td>FLOAT8[]. Vector of the t-statistics of the coefficients. </td></tr> +<tr> +<th>p_values </th><td>FLOAT8[]. Vector of the p-values of the coefficients. </td></tr> +<tr> +<th>condition_no </th><td>FLOAT8 array. The condition number of the \(X^{*}X\) matrix. A high condition number is usually an indication that there may be some numeric instability in the result yielding a less reliable model. A high condition number often results when there is a significant amount of colinearity in the underlying design matrix, in which case other regression techniques, such as elastic net regression, may be more appropriate. </td></tr> +<tr> +<th>bp_stats </th><td>FLOAT8. The Breush-Pagan statistic of heteroskedacity. Present only if the heteroskedacity argument was set to True when the model was trained. </td></tr> +<tr> +<th>bp_p_value </th><td>FLOAT8. The Breush-Pagan calculated p-value. Present only if the heteroskedacity parameter was set to True when the model was trained. </td></tr> +<tr> +<th>num_rows_processed </th><td>INTEGER. The number of rows that are actually used in each group. </td></tr> +<tr> +<th>num_missing_rows_skipped </th><td>INTEGER. The number of rows that have NULL values in the dependent and independent variables, and were skipped in the computation for each group. </td></tr> +<tr> +<th>variance_covariance </th><td>FLOAT[]. Variance/covariance matrix. </td></tr> +</table> +<p class="enddd">A summary table named <out_table>_summary is created together with the output table. It has the following columns: </p><table class="output"> +<tr> +<th>method </th><td>'linregr' for linear regression. </td></tr> +<tr> +<th>source_table </th><td>The data source table name </td></tr> +<tr> +<th>out_table </th><td>The output table name </td></tr> +<tr> +<th>dependent_varname </th><td>The dependent variable </td></tr> +<tr> +<th>independent_varname </th><td>The independent variables </td></tr> +<tr> +<th>num_rows_processed </th><td>The total number of rows that were used in the computation. </td></tr> +<tr> +<th>num_missing_rows_skipped </th><td>The total number of rows that were skipped because of NULL values in them. </td></tr> +<tr> +<th>grouping_cols </th><td>Names of the grouping columns. </td></tr> +</table> +</dd> +<dt></dt> +<dd><dl class="section note"><dt>Note</dt><dd>For p-values, we just return the computation result directly. Other statistical packages like 'R' produce the same result, but on printing the result to screen, another format function is used and any p-value that is smaller than the machine epsilon (the smallest positive floating-point number 'x' such that '1 + x != 1') will be printed on screen as "< xxx" (xxx is the value of the machine epsilon). Although the result may look different, they are in fact the same. </dd></dl> +</dd> +<dt>dependent_varname </dt> +<dd><p class="startdd">TEXT. Expression to evaluate for the dependent variable.</p> +<p class="enddd"></p> +</dd> +<dt>independent_varname </dt> +<dd><p class="startdd">TEXT. Expression list to evaluate for the independent variables. An intercept variable is not assumed. It is common to provide an explicit intercept term by including a single constant <code>1</code> term in the independent variable list.</p> +<p class="enddd"></p> +</dd> +<dt>grouping_cols (optional) </dt> +<dd><p class="startdd">TEXT, default: NULL. An expression list used to group the input dataset into discrete groups, running one regression per group. Similar to the SQL <code>GROUP BY</code> clause. When this value is null, no grouping is used and a single result model is generated for the whole data set.</p> +<p class="enddd"></p> +</dd> +<dt>heteroskedasticity_option (optional) </dt> +<dd>BOOLEAN, default: FALSE. When TRUE, the heteroskedasticity of the model is also calculated and returned with the results. </dd> +</dl> +<p><a class="anchor" id="warning"></a></p><dl class="section warning"><dt>Warning</dt><dd>The aggregate 'linregr' has been deprecated in favor of the function 'linregr_train'. If the aggregate 'linregr' is used to output the results of linear regression to a table, it is recommended to follow the general pattern shown below (replace text within '<...>' with the appropriate variable names). <pre class="syntax"> +CREATE TABLE <output table> AS +SELECT (r).* +FROM ( + SELECT linregr(<dependent variable>, <independent variable>) as r + FROM <source table> + ) q; +</pre></dd></dl> +<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd>The prediction function is as follows: <pre class="syntax"> +linregr_predict(coef, col_ind) +</pre> <b>Arguments</b> <dl class="arglist"> +<dt>coef </dt> +<dd>FLOAT8[]. Vector of the coefficients of regression from training. </dd> +<dt>col_ind </dt> +<dd><p class="startdd">FLOAT8[]. An array containing the independent variable column names, as was used for the training. </p> +<p class="enddd"><a class="anchor" id="examples"></a></p> +</dd> +</dl> +</dd></dl> +<dl class="section user"><dt>Examples</dt><dd><ol type="1"> +<li>Create an input data set. <pre class="example"> +DROP TABLE IF EXISTS houses; +CREATE TABLE houses (id INT, tax INT, bedroom INT, bath FLOAT, price INT, + size INT, lot INT); +INSERT INTO houses VALUES + (1 , 590 , 2 , 1 , 50000 , 770 , 22100), + (2 , 1050 , 3 , 2 , 85000 , 1410 , 12000), + (3 , 20 , 3 , 1 , 22500 , 1060 , 3500), + (4 , 870 , 2 , 2 , 90000 , 1300 , 17500), + (5 , 1320 , 3 , 2 , 133000 , 1500 , 30000), + (6 , 1350 , 2 , 1 , 90500 , 820 , 25700), + (7 , 2790 , 3 , 2.5 , 260000 , 2130 , 25000), + (8 , 680 , 2 , 1 , 142500 , 1170 , 22000), + (9 , 1840 , 3 , 2 , 160000 , 1500 , 19000), + (10 , 3680 , 4 , 2 , 240000 , 2790 , 20000), + (11 , 1660 , 3 , 1 , 87000 , 1030 , 17500), + (12 , 1620 , 3 , 2 , 118600 , 1250 , 20000), + (13 , 3100 , 3 , 2 , 140000 , 1760 , 38000), + (14 , 2070 , 2 , 3 , 148000 , 1550 , 14000), + (15 , 650 , 3 , 1.5 , 65000 , 1450 , 12000); +</pre></li> +<li>Train a regression model. First, we generate a single regression for all data. <pre class="example"> +DROP TABLE IF EXISTS houses_linregr, houses_linregr_summary; +SELECT madlib.linregr_train( 'houses', + 'houses_linregr', + 'price', + 'ARRAY[1, tax, bath, size]' + ); +</pre> (Note that in this example we are dynamically creating the array of independent variables from column names. If you have large numbers of independent variables beyond the PostgreSQL limit of maximum columns per table, you would pre-build the arrays and store them in a single column.)</li> +<li>Next we generate three output models, one for each value of "bedroom". <pre class="example"> +DROP TABLE IF EXISTS houses_linregr_bedroom, houses_linregr_bedroom_summary; +SELECT madlib.linregr_train( 'houses', + 'houses_linregr_bedroom', + 'price', + 'ARRAY[1, tax, bath, size]', + 'bedroom' + ); +</pre></li> +<li>Examine the resulting models. <pre class="example"> +-- Set extended display on for easier reading of output +\x ON +SELECT * FROM houses_linregr; +</pre> Result: <pre class="result"> +-[ RECORD 1 ]+--------------------------------------------------------------------------- +coef | {-12849.4168959872,28.9613922651765,10181.6290712648,50.516894915354} +r2 | 0.768577580597443 +std_err | {33453.0344331391,15.8992104963997,19437.7710925923,32.928023174087} +t_stats | {-0.38410317968819,1.82156166004184,0.523806408809133,1.53416118083605} +p_values | {0.708223134615422,0.0958005827189772,0.610804093526536,0.153235085548186} +condition_no | 9002.50457085737 +num_rows_processed | 15 +num_missing_rows_skipped | 0 +variance_covariance | {{1119105512.78479,217782.067878023,-283344228.394562,-616679.69319088}, ... +</pre> Alternatively you can unnest the results for easier reading of output. <pre class="example"> +\x OFF +SELECT unnest(ARRAY['intercept','tax','bath','size']) as attribute, + unnest(coef) as coefficient, + unnest(std_err) as standard_error, + unnest(t_stats) as t_stat, + unnest(p_values) as pvalue +FROM houses_linregr; +</pre> Result: <pre class="result"> + attribute | coefficient | standard_error | t_stat | pvalue +-----------+-------------------+------------------+-------------------+-------------------- + intercept | -12849.4168959872 | 33453.0344331391 | -0.38410317968819 | 0.708223134615422 + tax | 28.9613922651765 | 15.8992104963997 | 1.82156166004184 | 0.0958005827189772 + bath | 10181.6290712648 | 19437.7710925923 | 0.523806408809133 | 0.610804093526536 + size | 50.516894915354 | 32.928023174087 | 1.53416118083605 | 0.153235085548186 +(4 rows) +</pre></li> +<li>View the results grouped by bedroom. <pre class="example"> +\x ON +SELECT * FROM houses_linregr_bedroom ORDER BY bedroom; +</pre> Result: <pre class="result"> +-[ RECORD 1 ]------------+---------------------------------------------------------------- +bedroom | 4 +coef | {0.0112536020318378,41.4132554771633,0.0225072040636757,31.3975496688276} +r2 | 1 +std_err | {0,0,0,0} +t_stats | {Infinity,Infinity,Infinity,Infinity} +p_values | +condition_no | Infinity +num_rows_processed | 1 +num_missing_rows_skipped | 0 +variance_covariance | {{0,0,0,0},{0,0,0,0},{0,0,0,0},{0,0,0,0}} +-[ RECORD 2 ]------------+---------------------------------------------------------------- +bedroom | 3 +coef | {-88155.8292501601,27.1966436294429,41404.0293363612,62.637521075324} +r2 | 0.841699901311252 +std_err | {57867.9999702625,17.8272309154689,43643.1321511114,70.8506824863954} +t_stats | {-1.52339512849005,1.52556747362508,0.948695185143966,0.884077878676067} +p_values | {0.188161432894871,0.187636685729869,0.386340032374927,0.417132778705789} +condition_no | 11722.6225642147 +num_rows_processed | 9 +num_missing_rows_skipped | 0 +variance_covariance | {{3348705420.5583,433697.545104226,-70253017.45773,-2593488.13800193}, ... +-[ RECORD 3 ]------------+---------------------------------------------------------------- +bedroom | 2 +coef | {-84242.0345406597,55.4430144648696,-78966.9753675319,225.611910021192} +r2 | 0.968809546465313 +std_err | {35018.9991665742,19.5731125320686,23036.8071292552,49.0448678148784} +t_stats | {-2.40560942761235,2.83261103077151,-3.42786111480046,4.60011251070697} +p_values | {0.250804617665239,0.21605133377602,0.180704400437373,0.136272031474122} +condition_no | 10086.1048721726 +num_rows_processed | 5 +num_missing_rows_skipped | 0 +variance_covariance | {{1226330302.62852,-300921.595596804,551696673.397849,-1544160.63236119}, ... +</pre></li> +<li>Compare predicted price with actual. (This example uses the original data table to perform the prediction. Typically a different test dataset with the same features as the original training dataset would be used for prediction.) <pre class="example"> +\x OFF +SELECT houses.*, + madlib.linregr_predict( m.coef, + ARRAY[1,tax,bath,size] + ) as predict, + price - + madlib.linregr_predict( m.coef, + ARRAY[1,tax,bath,size] + ) as residual +FROM houses, houses_linregr m ORDER BY id; +</pre> Result: <pre class="result"> + id | tax | bedroom | bath | price | size | lot | predict | residual +----+------+---------+------+--------+------+-------+------------------+------------------- + 1 | 590 | 2 | 1 | 50000 | 770 | 22100 | 53317.4426965542 | -3317.44269655424 + 2 | 1050 | 3 | 2 | 85000 | 1410 | 12000 | 109152.124955627 | -24152.1249556268 + 3 | 20 | 3 | 1 | 22500 | 1060 | 3500 | 51459.3486308563 | -28959.3486308563 + 4 | 870 | 2 | 2 | 90000 | 1300 | 17500 | 98382.215907206 | -8382.21590720605 + 5 | 1320 | 3 | 2 | 133000 | 1500 | 30000 | 121518.221409606 | 11481.7785903937 + 6 | 1350 | 2 | 1 | 90500 | 820 | 25700 | 77853.9455638561 | 12646.0544361439 + 7 | 2790 | 3 | 2.5 | 260000 | 2130 | 25000 | 201007.926371721 | 58992.0736282788 + 8 | 680 | 2 | 1 | 142500 | 1170 | 22000 | 76130.7259665617 | 66369.2740334383 + 9 | 1840 | 3 | 2 | 160000 | 1500 | 19000 | 136578.145387498 | 23421.8546125019 + 10 | 3680 | 4 | 2 | 240000 | 2790 | 20000 | 255033.90159623 | -15033.9015962295 + 11 | 1660 | 3 | 1 | 87000 | 1030 | 17500 | 97440.5250982852 | -10440.5250982852 + 12 | 1620 | 3 | 2 | 118600 | 1250 | 20000 | 117577.415360321 | 1022.58463967926 + 13 | 3100 | 3 | 2 | 140000 | 1760 | 38000 | 186203.892319613 | -46203.8923196126 + 14 | 2070 | 2 | 3 | 148000 | 1550 | 14000 | 155946.739425521 | -7946.73942552117 + 15 | 650 | 3 | 1.5 | 65000 | 1450 | 12000 | 94497.4293105379 | -29497.4293105379 +(15 rows) +</pre></li> +<li>Compare predicted price with actual with grouping. It means a different model is used depending on the number of bedrooms. <pre class="example"> +\x OFF +SELECT houses.*, + madlib.linregr_predict( m.coef, + ARRAY[1,tax,bath,size] + ) as predict, + price - + madlib.linregr_predict( m.coef, + ARRAY[1,tax,bath,size] + ) as residual +FROM houses, houses_linregr_bedroom m +WHERE houses.bedroom = m.bedroom +ORDER BY id; +</pre> Result: <pre class="result"> + id | tax | bedroom | bath | price | size | lot | predict | residual +----+------+---------+------+--------+------+-------+------------------+------------------- + 1 | 590 | 2 | 1 | 50000 | 770 | 22100 | 43223.5393423978 | 6776.46065760222 + 2 | 1050 | 3 | 2 | 85000 | 1410 | 12000 | 111527.609949684 | -26527.609949684 + 3 | 20 | 3 | 1 | 22500 | 1060 | 3500 | 20187.9052986341 | 2312.09470136587 + 4 | 870 | 2 | 2 | 90000 | 1300 | 17500 | 99354.9203362612 | -9354.92033626116 + 5 | 1320 | 3 | 2 | 133000 | 1500 | 30000 | 124508.080626412 | 8491.91937358756 + 6 | 1350 | 2 | 1 | 90500 | 820 | 25700 | 96640.8258367579 | -6140.8258367579 + 7 | 2790 | 3 | 2.5 | 260000 | 2130 | 25000 | 224650.799707327 | 35349.2002926733 + 8 | 680 | 2 | 1 | 142500 | 1170 | 22000 | 138458.174652714 | 4041.82534728572 + 9 | 1840 | 3 | 2 | 160000 | 1500 | 19000 | 138650.335313722 | 21349.6646862777 + 10 | 3680 | 4 | 2 | 240000 | 2790 | 20000 | 240000 | 0 + 11 | 1660 | 3 | 1 | 87000 | 1030 | 17500 | 62911.2752186594 | 24088.7247813406 + 12 | 1620 | 3 | 2 | 118600 | 1250 | 20000 | 117007.693446414 | 1592.30655358579 + 13 | 3100 | 3 | 2 | 140000 | 1760 | 38000 | 189203.861766403 | -49203.8617664034 + 14 | 2070 | 2 | 3 | 148000 | 1550 | 14000 | 143322.539831869 | 4677.46016813093 + 15 | 650 | 3 | 1.5 | 65000 | 1450 | 12000 | 82452.4386727394 | -17452.4386727394 +(15 rows) +</pre></li> +</ol> +</dd></dl> +<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Note</dt><dd>All table names can be optionally schema qualified (current_schemas() would be searched if a schema name is not provided) and all table and column names should follow case-sensitivity and quoting rules per the database. (For instance, 'mytable' and 'MyTable' both resolve to the same entity, i.e. 'mytable'. If mixed-case or multi-byte characters are desired for entity names then the string should be double-quoted; in this case the input would be '"MyTable"').</dd></dl> +<p><a class="anchor" id="background"></a></p><dl class="section user"><dt>Technical Background</dt><dd></dd></dl> +<p>Ordinary least-squares (OLS) linear regression refers to a stochastic model in which the conditional mean of the dependent variable (usually denoted \( Y \)) is an affine function of the vector of independent variables (usually denoted \( \boldsymbol x \)). That is, </p><p class="formulaDsp"> +\[ E[Y \mid \boldsymbol x] = \boldsymbol c^T \boldsymbol x \] +</p> +<p> for some unknown vector of coefficients \( \boldsymbol c \). The assumption is that the residuals are i.i.d. distributed Gaussians. That is, the (conditional) probability density of \( Y \) is given by </p><p class="formulaDsp"> +\[ f(y \mid \boldsymbol x) = \frac{1}{\sqrt{2 \pi \sigma^2}} \cdot \exp\left(-\frac{1}{2 \sigma^2} \cdot (y - \boldsymbol x^T \boldsymbol c)^2 \right) \,. \] +</p> +<p> OLS linear regression finds the vector of coefficients \( \boldsymbol c \) that maximizes the likelihood of the observations.</p> +<p>Let</p><ul> +<li>\( \boldsymbol y \in \mathbf R^n \) denote the vector of observed dependent variables, with \( n \) rows, containing the observed values of the dependent variable,</li> +<li>\( X \in \mathbf R^{n \times k} \) denote the design matrix with \( k \) columns and \( n \) rows, containing all observed vectors of independent variables. \( \boldsymbol x_i \) as rows,</li> +<li>\( X^T \) denote the transpose of \( X \),</li> +<li>\( X^+ \) denote the pseudo-inverse of \( X \).</li> +</ul> +<p>Maximizing the likelihood is equivalent to maximizing the log-likelihood \( \sum_{i=1}^n \log f(y_i \mid \boldsymbol x_i) \), which simplifies to minimizing the <b>residual sum of squares</b> \( RSS \) (also called sum of squared residuals or sum of squared errors of prediction), </p><p class="formulaDsp"> +\[ RSS = \sum_{i=1}^n ( y_i - \boldsymbol c^T \boldsymbol x_i )^2 = (\boldsymbol y - X \boldsymbol c)^T (\boldsymbol y - X \boldsymbol c) \,. \] +</p> +<p> The first-order conditions yield that the \( RSS \) is minimized at </p><p class="formulaDsp"> +\[ \boldsymbol c = (X^T X)^+ X^T \boldsymbol y \,. \] +</p> +<p>Computing the <b>total sum of squares</b> \( TSS \), the <b>explained sum of squares</b> \( ESS \) (also called the regression sum of squares), and the <b>coefficient of determination</b> \( R^2 \) is done according to the following formulas: </p><p class="formulaDsp"> +\begin{align*} ESS & = \boldsymbol y^T X \boldsymbol c - \frac{ \| y \|_1^2 }{n} \\ TSS & = \sum_{i=1}^n y_i^2 - \frac{ \| y \|_1^2 }{n} \\ R^2 & = \frac{ESS}{TSS} \end{align*} +</p> +<p> Note: The last equality follows from the definition \( R^2 = 1 - \frac{RSS}{TSS} \) and the fact that for linear regression \( TSS = RSS + ESS \). A proof of the latter can be found, e.g., at: <a href="http://en.wikipedia.org/wiki/Sum_of_squares">http://en.wikipedia.org/wiki/Sum_of_squares</a></p> +<p>We estimate the variance \( Var[Y - \boldsymbol c^T \boldsymbol x \mid \boldsymbol x] \) as </p><p class="formulaDsp"> +\[ \sigma^2 = \frac{RSS}{n - k} \] +</p> +<p> and compute the t-statistic for coefficient \( i \) as </p><p class="formulaDsp"> +\[ t_i = \frac{c_i}{\sqrt{\sigma^2 \cdot \left( (X^T X)^{-1} \right)_{ii} }} \,. \] +</p> +<p>The \( p \)-value for coefficient \( i \) gives the probability of seeing a value at least as extreme as the one observed, provided that the null hypothesis ( \( c_i = 0 \)) is true. Letting \( F_\nu \) denote the cumulative density function of student-t with \( \nu \) degrees of freedom, the \( p \)-value for coefficient \( i \) is therefore </p><p class="formulaDsp"> +\[ p_i = \Pr(|T| \geq |t_i|) = 2 \cdot (1 - F_{n - k}( |t_i| )) \] +</p> +<p> where \( T \) is a student-t distributed random variable with mean 0.</p> +<p>The condition number [2] \( \kappa(X) = \|X\|_2\cdot\|X^{-1}\|_2\) is computed as the product of two spectral norms [3]. The spectral norm of a matrix \(X\) is the largest singular value of \(X\) i.e. the square root of the largest eigenvalue of the positive-semidefinite matrix \(X^{*}X\):</p> +<p class="formulaDsp"> +\[ \|X\|_2 = \sqrt{\lambda_{\max}\left(X^{*}X\right)}\ , \] +</p> +<p> where \(X^{*}\) is the conjugate transpose of \(X\). The condition number of a linear regression problem is a worst-case measure of how sensitive the result is to small perturbations of the input. A large condition number (say, more than 1000) indicates the presence of significant multicollinearity.</p> +<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl> +<p>[1] Cosma Shalizi: Statistics 36-350: Data Mining, Lecture Notes, 21 October 2009, <a href="http://www.stat.cmu.edu/~cshalizi/350/lectures/17/lecture-17.pdf">http://www.stat.cmu.edu/~cshalizi/350/lectures/17/lecture-17.pdf</a></p> +<p>[2] Wikipedia: Condition Number, <a href="http://en.wikipedia.org/wiki/Condition_number">http://en.wikipedia.org/wiki/Condition_number</a>.</p> +<p>[3] Wikipedia: Spectral Norm, <a href="http://en.wikipedia.org/wiki/Spectral_norm#Spectral_norm">http://en.wikipedia.org/wiki/Spectral_norm#Spectral_norm</a></p> +<p>[4] Wikipedia: BreuschâPagan test, <a href="http://en.wikipedia.org/wiki/Breusch%E2%80%93Pagan_test">http://en.wikipedia.org/wiki/Breusch%E2%80%93Pagan_test</a></p> +<p>[5] Wikipedia: Heteroscedasticity-consistent standard errors, <a href="http://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors">http://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors</a></p> +<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl> +<p><a class="el" href="group__grp__robust.html">Robust Variance</a></p> +<p><a class="el" href="group__grp__clustered__errors.html">Clustered Variance</a></p> +<p><a class="el" href="group__grp__validation.html">Cross Validation</a></p> +<p>File <a class="el" href="linear_8sql__in.html" title="SQL functions for linear regression. ">linear.sql_in</a>, source file for the SQL functions</p> +</div><!-- contents --> +</div><!-- doc-content --> +<!-- start footer part --> +<div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> + <ul> + <li class="footer">Generated on Mon Oct 15 2018 11:24:30 for MADlib by + <a href="http://www.doxygen.org/index.html"> + <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li> + </ul> +</div> +</body> +</html>
http://git-wip-us.apache.org/repos/asf/madlib-site/blob/af0e5f14/docs/v1.15.1/group__grp__lmf.html ---------------------------------------------------------------------- diff --git a/docs/v1.15.1/group__grp__lmf.html b/docs/v1.15.1/group__grp__lmf.html new file mode 100644 index 0000000..bf17de8 --- /dev/null +++ b/docs/v1.15.1/group__grp__lmf.html @@ -0,0 +1,284 @@ +<!-- HTML header for doxygen 1.8.4--> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> +<meta http-equiv="X-UA-Compatible" content="IE=9"/> +<meta name="generator" content="Doxygen 1.8.14"/> +<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/> +<title>MADlib: Low-Rank Matrix Factorization</title> +<link href="tabs.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="jquery.js"></script> +<script type="text/javascript" src="dynsections.js"></script> +<link href="navtree.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="resize.js"></script> +<script type="text/javascript" src="navtreedata.js"></script> +<script type="text/javascript" src="navtree.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(initResizable); +/* @license-end */</script> +<link href="search/search.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="search/searchdata.js"></script> +<script type="text/javascript" src="search/search.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(function() { init_search(); }); +/* @license-end */ +</script> +<script type="text/x-mathjax-config"> + MathJax.Hub.Config({ + extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"], + jax: ["input/TeX","output/HTML-CSS"], +}); +</script><script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js"></script> +<!-- hack in the navigation tree --> +<script type="text/javascript" src="eigen_navtree_hacks.js"></script> +<link href="doxygen.css" rel="stylesheet" type="text/css" /> +<link href="madlib_extra.css" rel="stylesheet" type="text/css"/> +<!-- google analytics --> +<script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + ga('create', 'UA-45382226-1', 'madlib.apache.org'); + ga('send', 'pageview'); +</script> +</head> +<body> +<div id="top"><!-- do not remove this div, it is closed by doxygen! --> +<div id="titlearea"> +<table cellspacing="0" cellpadding="0"> + <tbody> + <tr style="height: 56px;"> + <td id="projectlogo"><a href="http://madlib.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td> + <td style="padding-left: 0.5em;"> + <div id="projectname"> + <span id="projectnumber">1.15.1</span> + </div> + <div id="projectbrief">User Documentation for Apache MADlib</div> + </td> + <td> <div id="MSearchBox" class="MSearchBoxInactive"> + <span class="left"> + <img id="MSearchSelect" src="search/mag_sel.png" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + alt=""/> + <input type="text" id="MSearchField" value="Search" accesskey="S" + onfocus="searchBox.OnSearchFieldFocus(true)" + onblur="searchBox.OnSearchFieldFocus(false)" + onkeyup="searchBox.OnSearchFieldChange(event)"/> + </span><span class="right"> + <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> + </span> + </div> +</td> + </tr> + </tbody> +</table> +</div> +<!-- end header part --> +<!-- Generated by Doxygen 1.8.14 --> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +var searchBox = new SearchBox("searchBox", "search",false,'Search'); +/* @license-end */ +</script> +</div><!-- top --> +<div id="side-nav" class="ui-resizable side-nav-resizable"> + <div id="nav-tree"> + <div id="nav-tree-contents"> + <div id="nav-sync" class="sync"></div> + </div> + </div> + <div id="splitbar" style="-moz-user-select:none;" + class="ui-resizable-handle"> + </div> +</div> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +$(document).ready(function(){initNavTree('group__grp__lmf.html','');}); +/* @license-end */ +</script> +<div id="doc-content"> +<!-- window showing the filter options --> +<div id="MSearchSelectWindow" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + onkeydown="return searchBox.OnSearchSelectKey(event)"> +</div> + +<!-- iframe showing the search results (closed by default) --> +<div id="MSearchResultsWindow"> +<iframe src="javascript:void(0)" frameborder="0" + name="MSearchResults" id="MSearchResults"> +</iframe> +</div> + +<div class="header"> + <div class="headertitle"> +<div class="title">Low-Rank Matrix Factorization<div class="ingroups"><a class="el" href="group__grp__datatrans.html">Data Types and Transformations</a> » <a class="el" href="group__grp__arraysmatrix.html">Arrays and Matrices</a> » <a class="el" href="group__grp__matrix__factorization.html">Matrix Factorization</a></div></div> </div> +</div><!--header--> +<div class="contents"> +<div class="toc"><b>Contents</b> <ul> +<li> +<a href="#syntax">Function Syntax</a> </li> +<li> +<a href="#examples">Examples</a> </li> +<li> +<a href="#literature">Literature</a> </li> +</ul> +</div><p>This module implements "factor model" for representing an incomplete matrix using a low-rank approximation [1]. Mathematically, this model seeks to find matrices U and V (also referred as factors) that, for any given incomplete matrix A, minimizes:</p> +<p class="formulaDsp"> +\[ \|\boldsymbol A - \boldsymbol UV^{T} \|_2 \] +</p> +<p>subject to \(rank(\boldsymbol UV^{T}) \leq r\), where \(\|\cdot\|_2\) denotes the Frobenius norm. Let \(A\) be a \(m \times n\) matrix, then \(U\) will be \(m \times r\) and \(V\) will be \(n \times r\), in dimension, and \(1 \leq r \ll \min(m, n)\). This model is not intended to do the full decomposition, or to be used as part of inverse procedure. This model has been widely used in recommendation systems (e.g., Netflix [2]) and feature selection (e.g., image processing [3]).</p> +<p><a class="anchor" id="syntax"></a></p><dl class="section user"><dt>Function Syntax</dt><dd></dd></dl> +<p>Low-rank matrix factorization of an incomplete matrix into two factors.</p> +<pre class="syntax"> +lmf_igd_run( rel_output, + rel_source, + col_row, + col_column, + col_value, + row_dim, + column_dim, + max_rank, + stepsize, + scale_factor, + num_iterations, + tolerance + ) +</pre><p> <b>Arguments</b> </p><dl class="arglist"> +<dt>rel_output </dt> +<dd><p class="startdd">TEXT. The name of the table to receive the output.</p> +<p>Output factors matrix U and V are in a flattened format. </p><pre>RESULT AS ( + matrix_u DOUBLE PRECISION[], + matrix_v DOUBLE PRECISION[], + rmse DOUBLE PRECISION +);</pre><p class="enddd">Features correspond to row i is <code>matrix_u[i:i][1:r]</code>. Features correspond to column j is <code>matrix_v[j:j][1:r]</code>. </p> +</dd> +<dt>rel_source </dt> +<dd><p class="startdd">TEXT. The name of the table containing the input data.</p> +<p>The input matrix> is expected to be of the following form: </p><pre>{TABLE|VIEW} <em>input_table</em> ( + <em>row</em> INTEGER, + <em>col</em> INTEGER, + <em>value</em> DOUBLE PRECISION +)</pre><p class="enddd">Input is contained in a table that describes an incomplete matrix, with available entries specified as (row, column, value). The input matrix should be 1-based, which means row >= 1, and col >= 1. NULL values are not expected. </p> +</dd> +<dt>col_row </dt> +<dd>TEXT. The name of the column containing the row number. </dd> +<dt>col_column </dt> +<dd>TEXT. The name of the column containing the column number. </dd> +<dt>col_value </dt> +<dd>DOUBLE PRECISION. The value at (row, col). </dd> +<dt>row_dim (optional) </dt> +<dd>INTEGER, default: "SELECT max(col_row) FROM rel_source". The number of columns in the matrix. </dd> +<dt>column_dim (optional) </dt> +<dd>INTEGER, default: "SELECT max(col_col) FROM rel_source". The number of rows in the matrix. </dd> +<dt>max_rank </dt> +<dd>INTEGER, default: 20. The rank of desired approximation. </dd> +<dt>stepsize (optional) </dt> +<dd>DOUBLE PRECISION, default: 0.01. Hyper-parameter that decides how aggressive the gradient steps are. </dd> +<dt>scale_factor (optional) </dt> +<dd>DOUBLE PRECISION, default: 0.1. Hyper-parameter that decides scale of initial factors. </dd> +<dt>num_iterations (optional) </dt> +<dd>INTEGER, default: 10. Maximum number if iterations to perform regardless of convergence. </dd> +<dt>tolerance (optional) </dt> +<dd>DOUBLE PRECISION, default: 0.0001. Acceptable level of error in convergence. </dd> +</dl> +<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl> +<ol type="1"> +<li>Prepare an input table/view: <pre class="example"> +DROP TABLE IF EXISTS lmf_data; +CREATE TABLE lmf_data ( + row INT, + col INT, + val FLOAT8 +); +</pre></li> +<li>Populate the input table with some data. <pre class="example"> +INSERT INTO lmf_data VALUES (1, 1, 5.0); +INSERT INTO lmf_data VALUES (3, 100, 1.0); +INSERT INTO lmf_data VALUES (999, 10000, 2.0); +</pre></li> +<li>Call the <a class="el" href="lmf_8sql__in.html#ac1acb1f0e1f7008118f21c83546a4602" title="Low-rank matrix factorization of a incomplete matrix into two factors. ">lmf_igd_run()</a> stored procedure. <pre class="example"> +DROP TABLE IF EXISTS lmf_model; +SELECT madlib.lmf_igd_run( 'lmf_model', + 'lmf_data', + 'row', + 'col', + 'val', + 999, + 10000, + 3, + 0.1, + 2, + 10, + 1e-9 + ); +</pre> Example result (the exact result may not be the same). <pre class="result"> +NOTICE: +Finished low-rank matrix factorization using incremental gradient +DETAIL: + table : lmf_data (row, col, val) +Results: + RMSE = 0.0145966345300041 +Output: + view : SELECT * FROM lmf_model WHERE id = 1 + lmf_igd_run + ----------- + 1 + (1 row) +</pre></li> +<li>Sanity check of the result. You may need a model id returned and also indicated by the function <a class="el" href="lmf_8sql__in.html#ac1acb1f0e1f7008118f21c83546a4602" title="Low-rank matrix factorization of a incomplete matrix into two factors. ">lmf_igd_run()</a>, assuming 1 here: <pre class="example"> +SELECT array_dims(matrix_u) AS u_dims, array_dims(matrix_v) AS v_dims +FROM lmf_model +WHERE id = 1; +</pre> Result: <pre class="result"> + u_dims | v_dims + --------------+---------------- + [1:999][1:3] | [1:10000][1:3] + (1 row) +</pre></li> +<li>Query the result value. <pre class="example"> +SELECT matrix_u[2:2][1:3] AS row_2_features +FROM lmf_model +WHERE id = 1; +</pre> Example output (the exact result may not be the same): <pre class="result"> + row_2_features + --------------------------------------------------------- + {{1.12030523084104,0.522217971272767,0.0264869043603539}} + (1 row) +</pre></li> +<li>Make prediction of a missing entry (row=2, col=7654). <pre class="example"> +SELECT madlib.array_dot( + matrix_u[2:2][1:3], + matrix_v[7654:7654][1:3] + ) AS row_2_col_7654 +FROM lmf_model +WHERE id = 1; +</pre> Example output (the exact result may not be the same due the randomness of the algorithm): <pre class="result"> + row_2_col_7654 + ------------------ + 1.3201582940851 + (1 row) +</pre></li> +</ol> +<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl> +<p>[1] N. Srebro and T. Jaakkola. âWeighted Low-Rank Approximations.â In: ICML. Ed. by T. Fawcett and N. Mishra. AAAI Press, 2003, pp. 720â727. isbn: 1-57735-189-4.</p> +<p>[2] Simon Funk, Netflix Update: Try This at Home, December 11 2006, <a href="http://sifter.org/~simon/journal/20061211.html">http://sifter.org/~simon/journal/20061211.html</a></p> +<p>[3] J. Wright, A. Ganesh, S. Rao, Y. Peng, and Y. Ma. âRobust Principal Component Analysis: Exact Recovery of Corrupted Low-Rank Matrices via Convex Optimization.â In: NIPS. Ed. by Y. Bengio, D. Schuurmans, J. D. Lafferty, C. K. I. Williams, and A. Culotta. Curran Associates, Inc., 2009, pp. 2080â2088. isbn: 9781615679119. </p> +</div><!-- contents --> +</div><!-- doc-content --> +<!-- start footer part --> +<div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> + <ul> + <li class="footer">Generated on Mon Oct 15 2018 11:24:30 for MADlib by + <a href="http://www.doxygen.org/index.html"> + <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li> + </ul> +</div> +</body> +</html> http://git-wip-us.apache.org/repos/asf/madlib-site/blob/af0e5f14/docs/v1.15.1/group__grp__logreg.html ---------------------------------------------------------------------- diff --git a/docs/v1.15.1/group__grp__logreg.html b/docs/v1.15.1/group__grp__logreg.html new file mode 100644 index 0000000..b0fd9b9 --- /dev/null +++ b/docs/v1.15.1/group__grp__logreg.html @@ -0,0 +1,507 @@ +<!-- HTML header for doxygen 1.8.4--> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> +<meta http-equiv="X-UA-Compatible" content="IE=9"/> +<meta name="generator" content="Doxygen 1.8.14"/> +<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/> +<title>MADlib: Logistic Regression</title> +<link href="tabs.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="jquery.js"></script> +<script type="text/javascript" src="dynsections.js"></script> +<link href="navtree.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="resize.js"></script> +<script type="text/javascript" src="navtreedata.js"></script> +<script type="text/javascript" src="navtree.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(initResizable); +/* @license-end */</script> +<link href="search/search.css" rel="stylesheet" type="text/css"/> +<script type="text/javascript" src="search/searchdata.js"></script> +<script type="text/javascript" src="search/search.js"></script> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ + $(document).ready(function() { init_search(); }); +/* @license-end */ +</script> +<script type="text/x-mathjax-config"> + MathJax.Hub.Config({ + extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"], + jax: ["input/TeX","output/HTML-CSS"], +}); +</script><script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js"></script> +<!-- hack in the navigation tree --> +<script type="text/javascript" src="eigen_navtree_hacks.js"></script> +<link href="doxygen.css" rel="stylesheet" type="text/css" /> +<link href="madlib_extra.css" rel="stylesheet" type="text/css"/> +<!-- google analytics --> +<script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + ga('create', 'UA-45382226-1', 'madlib.apache.org'); + ga('send', 'pageview'); +</script> +</head> +<body> +<div id="top"><!-- do not remove this div, it is closed by doxygen! --> +<div id="titlearea"> +<table cellspacing="0" cellpadding="0"> + <tbody> + <tr style="height: 56px;"> + <td id="projectlogo"><a href="http://madlib.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td> + <td style="padding-left: 0.5em;"> + <div id="projectname"> + <span id="projectnumber">1.15.1</span> + </div> + <div id="projectbrief">User Documentation for Apache MADlib</div> + </td> + <td> <div id="MSearchBox" class="MSearchBoxInactive"> + <span class="left"> + <img id="MSearchSelect" src="search/mag_sel.png" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + alt=""/> + <input type="text" id="MSearchField" value="Search" accesskey="S" + onfocus="searchBox.OnSearchFieldFocus(true)" + onblur="searchBox.OnSearchFieldFocus(false)" + onkeyup="searchBox.OnSearchFieldChange(event)"/> + </span><span class="right"> + <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> + </span> + </div> +</td> + </tr> + </tbody> +</table> +</div> +<!-- end header part --> +<!-- Generated by Doxygen 1.8.14 --> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +var searchBox = new SearchBox("searchBox", "search",false,'Search'); +/* @license-end */ +</script> +</div><!-- top --> +<div id="side-nav" class="ui-resizable side-nav-resizable"> + <div id="nav-tree"> + <div id="nav-tree-contents"> + <div id="nav-sync" class="sync"></div> + </div> + </div> + <div id="splitbar" style="-moz-user-select:none;" + class="ui-resizable-handle"> + </div> +</div> +<script type="text/javascript"> +/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt GPL-v2 */ +$(document).ready(function(){initNavTree('group__grp__logreg.html','');}); +/* @license-end */ +</script> +<div id="doc-content"> +<!-- window showing the filter options --> +<div id="MSearchSelectWindow" + onmouseover="return searchBox.OnSearchSelectShow()" + onmouseout="return searchBox.OnSearchSelectHide()" + onkeydown="return searchBox.OnSearchSelectKey(event)"> +</div> + +<!-- iframe showing the search results (closed by default) --> +<div id="MSearchResultsWindow"> +<iframe src="javascript:void(0)" frameborder="0" + name="MSearchResults" id="MSearchResults"> +</iframe> +</div> + +<div class="header"> + <div class="headertitle"> +<div class="title">Logistic Regression<div class="ingroups"><a class="el" href="group__grp__super.html">Supervised Learning</a> » <a class="el" href="group__grp__regml.html">Regression Models</a></div></div> </div> +</div><!--header--> +<div class="contents"> +<div class="toc"><b>Contents</b><ul> +<li class="level1"> +<a href="#train">Training Function</a> </li> +<li class="level1"> +<a href="#predict">Prediction Function</a> </li> +<li class="level1"> +<a href="#examples">Examples</a> </li> +<li class="level1"> +<a href="#background">Technical Background</a> </li> +<li class="level1"> +<a href="#literature">Literature</a> </li> +<li class="level1"> +<a href="#related">Related Topics</a> </li> +</ul> +</div><p>Binomial logistic regression models the relationship between a dichotomous dependent variable and one or more predictor variables. The dependent variable may be a Boolean value or a categorial variable that can be represented with a Boolean expression. The probabilities describing the possible outcomes of a single trial are modeled, as a function of the predictor variables, using a logistic function.</p> +<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd>The logistic regression training function has the following format: <pre class="syntax"> +logregr_train( source_table, + out_table, + dependent_varname, + independent_varname, + grouping_cols, + max_iter, + optimizer, + tolerance, + verbose + ) +</pre> <b>Arguments</b> <dl class="arglist"> +<dt>source_table </dt> +<dd><p class="startdd">TEXT. Name of the table containing the training data.</p> +<p class="enddd"></p> +</dd> +<dt>out_table </dt> +<dd><p class="startdd">TEXT. Name of the generated table containing the output model.</p> +<p>The output table produced by the logistic regression training function contains the following columns:</p> +<table class="output"> +<tr> +<th><...> </th><td><p class="starttd">TEXT. Grouping columns, if provided in input. This could be multiple columns depending on the <code>grouping_cols</code> input. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>coef </th><td><p class="starttd">FLOAT8. Vector of the coefficients of the regression. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>log_likelihood </th><td><p class="starttd">FLOAT8. The log-likelihood \( l(\boldsymbol c) \). </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>std_err </th><td><p class="starttd">FLOAT8[]. Vector of the standard error of the coefficients. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>z_stats </th><td><p class="starttd">FLOAT8[]. Vector of the z-statistics of the coefficients. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>p_values </th><td><p class="starttd">FLOAT8[]. Vector of the p-values of the coefficients. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>odds_ratios </th><td><p class="starttd">FLOAT8[]. The odds ratio, \( \exp(c_i) \). </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>condition_no </th><td><p class="starttd">FLOAT8[]. The condition number of the \(X^{*}X\) matrix. A high condition number is usually an indication that there may be some numeric instability in the result yielding a less reliable model. A high condition number often results when there is a significant amount of colinearity in the underlying design matrix, in which case other regression techniques may be more appropriate. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_rows_processed </th><td><p class="starttd">INTEGER. The number of rows actually processed, which is equal to the total number of rows in the source table minus the number of skipped rows. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_missing_rows_skipped </th><td><p class="starttd">INTEGER. The number of rows skipped during the training. A row will be skipped if the independent_varname is NULL or contains NULL values. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_iterations </th><td><p class="starttd">INTEGER. The number of iterations actually completed. This would be different from the <code>nIterations</code> argument if a <code>tolerance</code> parameter is provided and the algorithm converges before all iterations are completed. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>variance_covariance </th><td>FLOAT[]. Variance/covariance matrix. </td></tr> +</table> +<p>A summary table named <out_table>_summary is also created at the same time, which has the following columns: </p><table class="output"> +<tr> +<th>method </th><td><p class="starttd">'logregr' for logistic regression. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>source_table </th><td><p class="starttd">The data source table name. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>out_table </th><td><p class="starttd">The output table name. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>dependent_varname </th><td><p class="starttd">The dependent variable name. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>independent_varname </th><td><p class="starttd">The independent variable names. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>optimizer_params </th><td><p class="starttd">A string that contains all the optimizer parameters, and has the form of 'optimizer=..., max_iter=..., tolerance=...' </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_all_groups </th><td><p class="starttd">How many groups of data were fit by the logistic model. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_failed_groups </th><td><p class="starttd">How many groups failed in training. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_rows_processed </th><td><p class="starttd">The total number of rows used in the computation. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>num_missing_rows_skipped </th><td><p class="starttd">The total number of rows skipped. </p> +<p class="endtd"></p> +</td></tr> +<tr> +<th>grouping_cols </th><td>Names of the grouping columns. </td></tr> +</table> +<p class="enddd"></p> +</dd> +<dt>dependent_varname </dt> +<dd><p class="startdd">TEXT. Name of the dependent variable column (of type BOOLEAN) in the training data, or an expression evaluating to a BOOLEAN.</p> +<p class="enddd"></p> +</dd> +<dt>independent_varname </dt> +<dd><p class="startdd">TEXT. Expression list to evaluate for the independent variables. An intercept variable is not assumed so it is common to provide an explicit intercept term by including a single constant <code>1</code> term in the independent variable list.</p> +<p class="enddd"></p> +</dd> +<dt>grouping_cols (optional) </dt> +<dd><p class="startdd">TEXT, default: NULL. An expression list used to group the input dataset into discrete groups, running one regression per group. Similar to the SQL "GROUP BY" clause. When this value is NULL, no grouping is used and a single model is generated for the whole data set.</p> +<p class="enddd"></p> +</dd> +<dt>max_iter (optional) </dt> +<dd><p class="startdd">INTEGER, default: 20. The maximum number of iterations allowed.</p> +<p class="enddd"></p> +</dd> +<dt>optimizer (optional) </dt> +<dd><p class="startdd">TEXT, default: 'irls'. The name of the optimizer to use: </p><table class="output"> +<tr> +<th>'newton' or 'irls' </th><td>Iteratively reweighted least squares </td></tr> +<tr> +<th>'cg' </th><td>conjugate gradient </td></tr> +<tr> +<th>'igd' </th><td>incremental gradient descent. </td></tr> +</table> +<p class="enddd"></p> +</dd> +<dt>tolerance (optional) </dt> +<dd><p class="startdd">FLOAT8, default: 0.0001. The difference between log-likelihood values in successive iterations that indicate convergence. A zero disables the convergence criterion, so that execution stops after the maximum iterations have completed, as set in the 'max_iter' parameter above.</p> +<p class="enddd"></p> +</dd> +<dt>verbose (optional) </dt> +<dd>BOOLEAN, default: FALSE. Provides verbose output of the results of training. </dd> +</dl> +</dd></dl> +<dl class="section note"><dt>Note</dt><dd>For p-values, we just return the computation result directly. Other statistical packages like 'R' produce the same result, but on printing the result to screen, another format function is used and any p-value that is smaller than the machine epsilon (the smallest positive floating-point number 'x' such that '1 + x != 1') will be printed on screen as "< xxx" (xxx is the value of the machine epsilon). Although the result may look different, they are in fact the same.</dd></dl> +<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd>Two prediction functions are provided. One predicts the boolean value of the dependent variable, and the other predicts the probability of the value of the dependent variable being 'True'. Syntax is the same for both functions.</dd></dl> +<p>The function to predict the boolean value (True/False) of the dependent variable has the following syntax: </p><pre class="syntax"> +logregr_predict(coefficients, + ind_var + ) +</pre><p>The function to predict the probability of the dependent variable being 'True' has the following syntax: </p><pre class="syntax"> +logregr_predict_prob(coefficients, + ind_var + ) +</pre><p><b>Arguments</b> </p><dl class="arglist"> +<dt>coefficients </dt> +<dd><p class="startdd">DOUBLE PRECISION[]. Model coefficients obtained from training <a class="el" href="logistic_8sql__in.html#a74210a7ef513dfcbdfdd9f3b37bfe428">logregr_train()</a>.</p> +<p class="enddd"></p> +</dd> +<dt>ind_var </dt> +<dd>Independent variables expressed as a DOUBLE array. This should be the same length as the array obtained by evaluation of the 'independent_varname' argument in <a class="el" href="logistic_8sql__in.html#a74210a7ef513dfcbdfdd9f3b37bfe428">logregr_train()</a>. </dd> +</dl> +<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd><ol type="1"> +<li>Create the training data table. This data set is related to predicting a second heart attack given treatment and health factors. <pre class="example"> +DROP TABLE IF EXISTS patients; +CREATE TABLE patients( id INTEGER NOT NULL, + second_attack INTEGER, + treatment INTEGER, + trait_anxiety INTEGER); +INSERT INTO patients VALUES +(1, 1, 1, 70), +(2, 1, 1, 80), +(3, 1, 1, 50), +(4, 1, 0, 60), +(5, 1, 0, 40), +(6, 1, 0, 65), +(7, 1, 0, 75), +(8, 1, 0, 80), +(9, 1, 0, 70), +(10, 1, 0, 60), +(11, 0, 1, 65), +(12, 0, 1, 50), +(13, 0, 1, 45), +(14, 0, 1, 35), +(15, 0, 1, 40), +(16, 0, 1, 50), +(17, 0, 0, 55), +(18, 0, 0, 45), +(19, 0, 0, 50), +(20, 0, 0, 60); +</pre></li> +<li>Train a regression model. <pre class="example"> +DROP TABLE IF EXISTS patients_logregr, patients_logregr_summary; +SELECT madlib.logregr_train( 'patients', -- Source table + 'patients_logregr', -- Output table + 'second_attack', -- Dependent variable + 'ARRAY[1, treatment, trait_anxiety]', -- Feature vector + NULL, -- Grouping + 20, -- Max iterations + 'irls' -- Optimizer to use + ); +</pre> Note that in the example above we are dynamically creating the array of independent variables from column names. If you have large numbers of independent variables beyond the PostgreSQL limit of maximum columns per table, you would typically pre-build the arrays and store them in a single column.</li> +<li>View the regression results. <pre class="example"> +-- Set extended display on for easier reading of output +\x on +SELECT * from patients_logregr; +</pre> Result: <pre class="result"> +coef | {-6.36346994178192,-1.02410605239327,0.119044916668607} +log_likelihood | -9.41018298388876 +std_err | {3.21389766375099,1.17107844860319,0.0549790458269317} +z_stats | {-1.97998524145757,-0.874498248699539,2.16527796868916} +p_values | {0.0477051870698145,0.381846973530455,0.0303664045046183} +odds_ratios | {0.00172337630923221,0.359117354054956,1.12642051220895} +condition_no | 326.081922791575 +num_rows_processed | 20 +num_missing_rows_skipped | 0 +num_iterations | 5 +variance_covariance | {{10.329138193064,-0.474304665195738,-0.171995901260057}, ... +</pre></li> +<li>Alternatively, unnest the arrays in the results for easier reading of output: <pre class="example"> +\x off +SELECT unnest(array['intercept', 'treatment', 'trait_anxiety']) as attribute, + unnest(coef) as coefficient, + unnest(std_err) as standard_error, + unnest(z_stats) as z_stat, + unnest(p_values) as pvalue, + unnest(odds_ratios) as odds_ratio + FROM patients_logregr; +</pre> Result: <pre class="result"> + attribute | coefficient | standard_error | z_stat | pvalue | odds_ratio +---------------+-------------------+--------------------+--------------------+--------------------+--------------------- + intercept | -6.36346994178192 | 3.21389766375099 | -1.97998524145757 | 0.0477051870698145 | 0.00172337630923221 + treatment | -1.02410605239327 | 1.17107844860319 | -0.874498248699539 | 0.381846973530455 | 0.359117354054956 + trait_anxiety | 0.119044916668607 | 0.0549790458269317 | 2.16527796868916 | 0.0303664045046183 | 1.12642051220895 +(3 rows) +</pre></li> +<li>Predict the dependent variable using the logistic regression model. (This example uses the original data table to perform the prediction. Typically a different test dataset with the same features as the original training dataset would be used for prediction.) <pre class="example"> +\x off +-- Display prediction value along with the original value +SELECT p.id, madlib.logregr_predict(coef, ARRAY[1, treatment, trait_anxiety]), + p.second_attack::BOOLEAN +FROM patients p, patients_logregr m +ORDER BY p.id; +</pre> Result: <pre class="result"> + id | logregr_predict | second_attack +----+-----------------+--------------- + 1 | t | t + 2 | t | t + 3 | f | t + 4 | t | t + 5 | f | t + 6 | t | t + 7 | t | t + 8 | t | t + 9 | t | t + 10 | t | t + 11 | t | f + 12 | f | f + 13 | f | f + 14 | f | f + 15 | f | f + 16 | f | f + 17 | t | f + 18 | f | f + 19 | f | f + 20 | t | f +(20 rows) +</pre></li> +<li>Predict the probability of the dependent variable being TRUE. <pre class="example"> +\x off +-- Display prediction value along with the original value +SELECT p.id, madlib.logregr_predict_prob(coef, ARRAY[1, treatment, trait_anxiety]), + p.second_attack::BOOLEAN +FROM patients p, patients_logregr m +ORDER BY p.id; +</pre> Result: <pre class="result"> + id | logregr_predict_prob | second_attack +----+----------------------+--------------- + 1 | 0.720223028941527 | t + 2 | 0.894354902502048 | t + 3 | 0.192269541755171 | t + 4 | 0.685513072239347 | t + 5 | 0.167747881508857 | t + 6 | 0.79809810891514 | t + 7 | 0.928568075752503 | t + 8 | 0.959305763693571 | t + 9 | 0.877576117431452 | t + 10 | 0.685513072239347 | t + 11 | 0.586700895943317 | f + 12 | 0.192269541755171 | f + 13 | 0.116032010632994 | f + 14 | 0.0383829143134982 | f + 15 | 0.0674976224147597 | f + 16 | 0.192269541755171 | f + 17 | 0.545870774302621 | f + 18 | 0.267675422387132 | f + 19 | 0.398618639285111 | f + 20 | 0.685513072239347 | f +(20 rows) +</pre></li> +</ol> +</dd></dl> +<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Notes</dt><dd>All table names can be optionally schema qualified (current_schemas() would be searched if a schema name is not provided) and all table and column names should follow case-sensitivity and quoting rules per the database. (For instance, 'mytable' and 'MyTable' both resolve to the same entity, i.e. 'mytable'. If mixed-case or multi-byte characters are desired for entity names then the string should be double-quoted; in this case the input would be '"MyTable"').</dd></dl> +<p><a class="anchor" id="background"></a></p><dl class="section user"><dt>Technical Background</dt><dd></dd></dl> +<p>(Binomial) logistic regression refers to a stochastic model in which the conditional mean of the dependent dichotomous variable (usually denoted \( Y \in \{ 0,1 \} \)) is the logistic function of an affine function of the vector of independent variables (usually denoted \( \boldsymbol x \)). That is, </p><p class="formulaDsp"> +\[ E[Y \mid \boldsymbol x] = \sigma(\boldsymbol c^T \boldsymbol x) \] +</p> +<p> for some unknown vector of coefficients \( \boldsymbol c \) and where \( \sigma(x) = \frac{1}{1 + \exp(-x)} \) is the logistic function. Logistic regression finds the vector of coefficients \( \boldsymbol c \) that maximizes the likelihood of the observations.</p> +<p>Let</p><ul> +<li>\( \boldsymbol y \in \{ 0,1 \}^n \) denote the vector of observed dependent variables, with \( n \) rows, containing the observed values of the dependent variable,</li> +<li>\( X \in \mathbf R^{n \times k} \) denote the design matrix with \( k \) columns and \( n \) rows, containing all observed vectors of independent variables \( \boldsymbol x_i \) as rows.</li> +</ul> +<p>By definition, </p><p class="formulaDsp"> +\[ P[Y = y_i | \boldsymbol x_i] = \sigma((-1)^{(1 - y_i)} \cdot \boldsymbol c^T \boldsymbol x_i) \,. \] +</p> +<p> Maximizing the likelihood \( \prod_{i=1}^n \Pr(Y = y_i \mid \boldsymbol x_i) \) is equivalent to maximizing the log-likelihood \( \sum_{i=1}^n \log \Pr(Y = y_i \mid \boldsymbol x_i) \), which simplifies to </p><p class="formulaDsp"> +\[ l(\boldsymbol c) = -\sum_{i=1}^n \log(1 + \exp((-1)^{(1 - y_i)} \cdot \boldsymbol c^T \boldsymbol x_i)) \,. \] +</p> +<p> The Hessian of this objective is \( H = -X^T A X \) where \( A = \text{diag}(a_1, \dots, a_n) \) is the diagonal matrix with \( a_i = \sigma(\boldsymbol c^T \boldsymbol x) \cdot \sigma(-\boldsymbol c^T \boldsymbol x) \,. \) Since \( H \) is non-positive definite, \( l(\boldsymbol c) \) is convex. There are many techniques for solving convex optimization problems. Currently, logistic regression in MADlib can use one of three algorithms:</p><ul> +<li>Iteratively Reweighted Least Squares</li> +<li>A conjugate-gradient approach, also known as Fletcher-Reeves method in the literature, where we use the Hestenes-Stiefel rule for calculating the step size.</li> +<li>Incremental gradient descent, also known as incremental gradient methods or stochastic gradient descent in the literature.</li> +</ul> +<p>We estimate the standard error for coefficient \( i \) as </p><p class="formulaDsp"> +\[ \mathit{se}(c_i) = \left( (X^T A X)^{-1} \right)_{ii} \,. \] +</p> +<p> The Wald z-statistic is </p><p class="formulaDsp"> +\[ z_i = \frac{c_i}{\mathit{se}(c_i)} \,. \] +</p> +<p>The Wald \( p \)-value for coefficient \( i \) gives the probability (under the assumptions inherent in the Wald test) of seeing a value at least as extreme as the one observed, provided that the null hypothesis ( \( c_i = 0 \)) is true. Letting \( F \) denote the cumulative density function of a standard normal distribution, the Wald \( p \)-value for coefficient \( i \) is therefore </p><p class="formulaDsp"> +\[ p_i = \Pr(|Z| \geq |z_i|) = 2 \cdot (1 - F( |z_i| )) \] +</p> +<p> where \( Z \) is a standard normally distributed random variable.</p> +<p>The odds ratio for coefficient \( i \) is estimated as \( \exp(c_i) \).</p> +<p>The condition number is computed as \( \kappa(X^T A X) \) during the iteration immediately <em>preceding</em> convergence (i.e., \( A \) is computed using the coefficients of the previous iteration). A large condition number (say, more than 1000) indicates the presence of significant multicollinearity.</p> +<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl> +<p>A selection of references pertaining to logistic regression, with some good pointers to other literature.</p> +<p>[1] Cosma Shalizi: Statistics 36-350: Data Mining, Lecture Notes, 18 November 2009, <a href="http://www.stat.cmu.edu/~cshalizi/350/lectures/26/lecture-26.pdf">http://www.stat.cmu.edu/~cshalizi/350/lectures/26/lecture-26.pdf</a></p> +<p>[2] Thomas P. Minka: A comparison of numerical optimizers for logistic regression, 2003 (revised Mar 26, 2007), <a href="http://research.microsoft.com/en-us/um/people/minka/papers/logreg/minka-logreg.pdf">http://research.microsoft.com/en-us/um/people/minka/papers/logreg/minka-logreg.pdf</a></p> +<p>[3] Paul Komarek, Andrew W. Moore: Making Logistic Regression A Core Data Mining Tool With TR-IRLS, IEEE International Conference on Data Mining 2005, pp. 685-688, <a href="http://komarix.org/ac/papers/tr-irls.short.pdf">http://komarix.org/ac/papers/tr-irls.short.pdf</a></p> +<p>[4] D. P. Bertsekas: Incremental gradient, subgradient, and proximal methods for convex optimization: a survey, Technical report, Laboratory for Information and Decision Systems, 2010, <a href="http://web.mit.edu/dimitrib/www/Incremental_Survey_LIDS.pdf">http://web.mit.edu/dimitrib/www/Incremental_Survey_LIDS.pdf</a></p> +<p>[5] A. Nemirovski, A. Juditsky, G. Lan, and A. Shapiro: Robust stochastic approximation approach to stochastic programming, SIAM Journal on Optimization, 19(4), 2009, <a href="http://www2.isye.gatech.edu/~nemirovs/SIOPT_RSA_2009.pdf">http://www2.isye.gatech.edu/~nemirovs/SIOPT_RSA_2009.pdf</a></p> +<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl> +<p>File <a class="el" href="logistic_8sql__in.html" title="SQL functions for logistic regression. ">logistic.sql_in</a> documenting the training function</p> +<p><a class="el" href="logistic_8sql__in.html#a74210a7ef513dfcbdfdd9f3b37bfe428" title="Compute logistic-regression coefficients and diagnostic statistics. ">logregr_train()</a></p> +<p><a class="el" href="elastic__net_8sql__in.html#a735038a5090c112505c740a90a203e83" title="Interface for elastic net. ">elastic_net_train()</a></p> +<p><a class="el" href="group__grp__linreg.html">Linear Regression</a></p> +<p><a class="el" href="group__grp__multinom.html">Multinomial Regression</a></p> +<p><a class="el" href="group__grp__ordinal.html">Ordinal Regression</a></p> +<p><a class="el" href="group__grp__robust.html">Robust Variance</a></p> +<p><a class="el" href="group__grp__clustered__errors.html">Clustered Variance</a></p> +<p><a class="el" href="group__grp__validation.html">Cross Validation</a></p> +<p><a class="el" href="group__grp__marginal.html">Marginal Effects</a></p> +</div><!-- contents --> +</div><!-- doc-content --> +<!-- start footer part --> +<div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> + <ul> + <li class="footer">Generated on Mon Oct 15 2018 11:24:30 for MADlib by + <a href="http://www.doxygen.org/index.html"> + <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li> + </ul> +</div> +</body> +</html>