Repository: spark
Updated Branches:
  refs/heads/branch-2.0 5d495292b -> cad4693f9


[SPARK-3359][DOCS] More changes to resolve javadoc 8 errors that will help 
unidoc/genjavadoc compatibility

## What changes were proposed in this pull request?

These are yet more changes that resolve problems with unidoc/genjavadoc and 
Java 8. It does not fully resolve the problem, but gets rid of as many errors 
as we can from this end.

## How was this patch tested?

Jenkins build of docs

Author: Sean Owen <so...@cloudera.com>

Closes #14221 from srowen/SPARK-3359.3.

(cherry picked from commit 5ec0d692b0789a1d06db35134ee6eac2ecce47c3)
Signed-off-by: Reynold Xin <r...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/cad4693f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/cad4693f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/cad4693f

Branch: refs/heads/branch-2.0
Commit: cad4693f963fe1d1ca67803b3c3d8958797e034c
Parents: 5d49529
Author: Sean Owen <so...@cloudera.com>
Authored: Sat Jul 16 13:26:58 2016 -0700
Committer: Reynold Xin <r...@databricks.com>
Committed: Sat Jul 16 13:27:03 2016 -0700

----------------------------------------------------------------------
 .../apache/spark/graphx/util/GraphGenerators.scala  |  2 +-
 .../scala/org/apache/spark/ml/linalg/Vectors.scala  |  2 +-
 .../main/scala/org/apache/spark/ml/Pipeline.scala   |  2 +-
 .../main/scala/org/apache/spark/ml/Predictor.scala  |  2 +-
 .../apache/spark/ml/classification/Classifier.scala |  6 +++---
 .../ml/classification/DecisionTreeClassifier.scala  |  4 ++--
 .../spark/ml/classification/GBTClassifier.scala     |  8 ++++----
 .../ml/classification/LogisticRegression.scala      | 14 +++++++-------
 .../ml/classification/ProbabilisticClassifier.scala | 10 +++++-----
 .../org/apache/spark/ml/evaluation/Evaluator.scala  |  2 +-
 .../org/apache/spark/ml/feature/ChiSqSelector.scala |  4 ++--
 .../scala/org/apache/spark/ml/param/params.scala    | 13 ++++++-------
 .../org/apache/spark/ml/recommendation/ALS.scala    | 16 ++++++++--------
 .../main/scala/org/apache/spark/ml/tree/Node.scala  |  4 ++--
 .../main/scala/org/apache/spark/ml/tree/Split.scala |  4 ++--
 .../scala/org/apache/spark/ml/tree/treeModels.scala |  4 ++--
 .../apache/spark/mllib/feature/ChiSqSelector.scala  |  4 ++--
 .../scala/org/apache/spark/mllib/feature/PCA.scala  |  4 ++--
 .../apache/spark/mllib/feature/StandardScaler.scala |  5 ++---
 .../org/apache/spark/mllib/tree/impurity/Gini.scala |  4 ++--
 .../org/apache/spark/mllib/util/modelSaveLoad.scala |  2 +-
 21 files changed, 57 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
----------------------------------------------------------------------
diff --git 
a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala 
b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
index 4da1ecb..2b3e5f9 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
@@ -119,7 +119,7 @@ object GraphGenerators extends Logging {
    * A random graph generator using the R-MAT model, proposed in
    * "R-MAT: A Recursive Model for Graph Mining" by Chakrabarti et al.
    *
-   * See [[http://www.cs.cmu.edu/~christos/PUBLICATIONS/siam04.pdf]].
+   * See http://www.cs.cmu.edu/~christos/PUBLICATIONS/siam04.pdf.
    */
   def rmatGraph(sc: SparkContext, requestedNumVertices: Int, numEdges: Int): 
Graph[Int, Int] = {
     // let N = requestedNumVertices

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala
----------------------------------------------------------------------
diff --git 
a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala 
b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala
index c74e5d4..0659324 100644
--- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala
+++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala
@@ -66,7 +66,7 @@ sealed trait Vector extends Serializable {
 
   /**
    * Returns a hash code value for the vector. The hash code is based on its 
size and its first 128
-   * nonzero entries, using a hash algorithm similar to 
[[java.util.Arrays.hashCode]].
+   * nonzero entries, using a hash algorithm similar to 
`java.util.Arrays.hashCode`.
    */
   override def hashCode(): Int = {
     // This is a reference implementation. It calls return in foreachActive, 
which is slow.

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala 
b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
index d18fb69..195a93e 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
@@ -212,7 +212,7 @@ object Pipeline extends MLReadable[Pipeline] {
     }
   }
 
-  /** Methods for [[MLReader]] and [[MLWriter]] shared between [[Pipeline]] 
and [[PipelineModel]] */
+  /** Methods for `MLReader` and `MLWriter` shared between [[Pipeline]] and 
[[PipelineModel]] */
   private[ml] object SharedReadWrite {
 
     import org.json4s.JsonDSL._

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/Predictor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/Predictor.scala 
b/mllib/src/main/scala/org/apache/spark/ml/Predictor.scala
index 569a5fb..e29d7f4 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/Predictor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/Predictor.scala
@@ -165,7 +165,7 @@ abstract class PredictionModel[FeaturesType, M <: 
PredictionModel[FeaturesType,
   }
 
   /**
-   * Transforms dataset by reading from [[featuresCol]], calling 
[[predict()]], and storing
+   * Transforms dataset by reading from [[featuresCol]], calling `predict`, 
and storing
    * the predictions as a new column [[predictionCol]].
    *
    * @param dataset input dataset

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala 
b/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala
index e35b04a..6decea7 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala
@@ -50,7 +50,7 @@ private[spark] trait ClassifierParams
  * Single-label binary or multiclass classification.
  * Classes are indexed {0, 1, ..., numClasses - 1}.
  *
- * @tparam FeaturesType  Type of input features.  E.g., [[Vector]]
+ * @tparam FeaturesType  Type of input features.  E.g., `Vector`
  * @tparam E  Concrete Estimator type
  * @tparam M  Concrete Model type
  */
@@ -134,7 +134,7 @@ abstract class Classifier[
  * Model produced by a [[Classifier]].
  * Classes are indexed {0, 1, ..., numClasses - 1}.
  *
- * @tparam FeaturesType  Type of input features.  E.g., [[Vector]]
+ * @tparam FeaturesType  Type of input features.  E.g., `Vector`
  * @tparam M  Concrete Model type
  */
 @DeveloperApi
@@ -151,7 +151,7 @@ abstract class ClassificationModel[FeaturesType, M <: 
ClassificationModel[Featur
    * Transforms dataset by reading from [[featuresCol]], and appending new 
columns as specified by
    * parameters:
    *  - predicted labels as [[predictionCol]] of type [[Double]]
-   *  - raw predictions (confidences) as [[rawPredictionCol]] of type 
[[Vector]].
+   *  - raw predictions (confidences) as [[rawPredictionCol]] of type `Vector`.
    *
    * @param dataset input dataset
    * @return transformed dataset

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
index 082848c9..7129301 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
@@ -36,7 +36,7 @@ import org.apache.spark.sql.Dataset
 
 
 /**
- * [[http://en.wikipedia.org/wiki/Decision_tree_learning Decision tree]] 
learning algorithm
+ * Decision tree learning algorithm 
(http://en.wikipedia.org/wiki/Decision_tree_learning)
  * for classification.
  * It supports both binary and multiclass labels, as well as both continuous 
and categorical
  * features.
@@ -135,7 +135,7 @@ object DecisionTreeClassifier extends 
DefaultParamsReadable[DecisionTreeClassifi
 }
 
 /**
- * [[http://en.wikipedia.org/wiki/Decision_tree_learning Decision tree]] model 
for classification.
+ * Decision tree model (http://en.wikipedia.org/wiki/Decision_tree_learning) 
for classification.
  * It supports both binary and multiclass labels, as well as both continuous 
and categorical
  * features.
  */

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala 
b/mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala
index 5946a12..ba70293 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala
@@ -40,7 +40,7 @@ import org.apache.spark.sql.functions._
 import org.apache.spark.sql.types.DoubleType
 
 /**
- * [[http://en.wikipedia.org/wiki/Gradient_boosting Gradient-Boosted Trees 
(GBTs)]]
+ * Gradient-Boosted Trees (GBTs) 
(http://en.wikipedia.org/wiki/Gradient_boosting)
  * learning algorithm for classification.
  * It supports binary labels, as well as both continuous and categorical 
features.
  * Note: Multiclass labels are not currently supported.
@@ -158,7 +158,7 @@ object GBTClassifier extends 
DefaultParamsReadable[GBTClassifier] {
 }
 
 /**
- * [[http://en.wikipedia.org/wiki/Gradient_boosting Gradient-Boosted Trees 
(GBTs)]]
+ * Gradient-Boosted Trees (GBTs) 
(http://en.wikipedia.org/wiki/Gradient_boosting)
  * model for classification.
  * It supports binary labels, as well as both continuous and categorical 
features.
  * Note: Multiclass labels are not currently supported.
@@ -233,8 +233,8 @@ class GBTClassificationModel private[ml](
    * The importance vector is normalized to sum to 1. This method is suggested 
by Hastie et al.
    * (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd 
Edition." 2001.)
    * and follows the implementation from scikit-learn.
-   *
-   * @see [[DecisionTreeClassificationModel.featureImportances]]
+
+   * See `DecisionTreeClassificationModel.featureImportances`
    */
   @Since("2.0.0")
   lazy val featureImportances: Vector = 
TreeEnsembleModel.featureImportances(trees, numFeatures)

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index 4bab801..1fed5fd 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -863,10 +863,10 @@ class BinaryLogisticRegressionSummary 
private[classification] (
    * Returns the receiver operating characteristic (ROC) curve,
    * which is a Dataframe having two fields (FPR, TPR)
    * with (0.0, 0.0) prepended and (1.0, 1.0) appended to it.
+   * See http://en.wikipedia.org/wiki/Receiver_operating_characteristic
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
-   * @see http://en.wikipedia.org/wiki/Receiver_operating_characteristic
    */
   @Since("1.5.0")
   @transient lazy val roc: DataFrame = binaryMetrics.roc().toDF("FPR", "TPR")
@@ -874,7 +874,7 @@ class BinaryLogisticRegressionSummary 
private[classification] (
   /**
    * Computes the area under the receiver operating characteristic (ROC) curve.
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
    */
   @Since("1.5.0")
@@ -884,7 +884,7 @@ class BinaryLogisticRegressionSummary 
private[classification] (
    * Returns the precision-recall curve, which is a Dataframe containing
    * two fields recall, precision with (0.0, 1.0) prepended to it.
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
    */
   @Since("1.5.0")
@@ -893,7 +893,7 @@ class BinaryLogisticRegressionSummary 
private[classification] (
   /**
    * Returns a dataframe with two fields (threshold, F-Measure) curve with 
beta = 1.0.
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
    */
   @Since("1.5.0")
@@ -906,7 +906,7 @@ class BinaryLogisticRegressionSummary 
private[classification] (
    * Every possible probability obtained in transforming the dataset are used
    * as thresholds used in calculating the precision.
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
    */
   @Since("1.5.0")
@@ -919,7 +919,7 @@ class BinaryLogisticRegressionSummary 
private[classification] (
    * Every possible probability obtained in transforming the dataset are used
    * as thresholds used in calculating the recall.
    *
-   * Note: This ignores instance weights (setting all to 1.0) from 
[[LogisticRegression.weightCol]].
+   * Note: This ignores instance weights (setting all to 1.0) from 
`LogisticRegression.weightCol`.
    *       This will change in later Spark versions.
    */
   @Since("1.5.0")

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala
index 59277d0..88642ab 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala
@@ -45,7 +45,7 @@ private[classification] trait ProbabilisticClassifierParams
  *
  * Single-label binary or multiclass classifier which can output class 
conditional probabilities.
  *
- * @tparam FeaturesType  Type of input features.  E.g., [[Vector]]
+ * @tparam FeaturesType  Type of input features.  E.g., `Vector`
  * @tparam E  Concrete Estimator type
  * @tparam M  Concrete Model type
  */
@@ -70,7 +70,7 @@ abstract class ProbabilisticClassifier[
  * Model produced by a [[ProbabilisticClassifier]].
  * Classes are indexed {0, 1, ..., numClasses - 1}.
  *
- * @tparam FeaturesType  Type of input features.  E.g., [[Vector]]
+ * @tparam FeaturesType  Type of input features.  E.g., `Vector`
  * @tparam M  Concrete Model type
  */
 @DeveloperApi
@@ -89,8 +89,8 @@ abstract class ProbabilisticClassificationModel[
    * Transforms dataset by reading from [[featuresCol]], and appending new 
columns as specified by
    * parameters:
    *  - predicted labels as [[predictionCol]] of type [[Double]]
-   *  - raw predictions (confidences) as [[rawPredictionCol]] of type 
[[Vector]]
-   *  - probability of each class as [[probabilityCol]] of type [[Vector]].
+   *  - raw predictions (confidences) as [[rawPredictionCol]] of type `Vector`
+   *  - probability of each class as [[probabilityCol]] of type `Vector`.
    *
    * @param dataset input dataset
    * @return transformed dataset
@@ -210,7 +210,7 @@ private[ml] object ProbabilisticClassificationModel {
   /**
    * Normalize a vector of raw predictions to be a multinomial probability 
vector, in place.
    *
-   * The input raw predictions should be >= 0.
+   * The input raw predictions should be nonnegative.
    * The output vector sums to 1, unless the input vector is all-0 (in which 
case the output is
    * all-0 too).
    *

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala 
b/mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala
index dfbc3e5..e7b949d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala
@@ -53,7 +53,7 @@ abstract class Evaluator extends Params {
   def evaluate(dataset: Dataset[_]): Double
 
   /**
-   * Indicates whether the metric returned by [[evaluate()]] should be 
maximized (true, default)
+   * Indicates whether the metric returned by `evaluate` should be maximized 
(true, default)
    * or minimized (false).
    * A given evaluator may support multiple metrics which may be maximized or 
minimized.
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
index bd053e8..1482eb3 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/ChiSqSelector.scala
@@ -42,8 +42,8 @@ private[feature] trait ChiSqSelectorParams extends Params
 
   /**
    * Number of features that selector will select (ordered by statistic value 
descending). If the
-   * number of features is < numTopFeatures, then this will select all 
features. The default value
-   * of numTopFeatures is 50.
+   * number of features is less than numTopFeatures, then this will select all 
features.
+   * The default value of numTopFeatures is 50.
    * @group param
    */
   final val numTopFeatures = new IntParam(this, "numTopFeatures",

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala 
b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
index e7780cf..9245931 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
@@ -552,7 +552,7 @@ trait Params extends Identifiable with Serializable {
    *
    * This only needs to check for interactions between parameters.
    * Parameter value checks which do not depend on other parameters are 
handled by
-   * [[Param.validate()]].  This method does not handle input/output column 
parameters;
+   * `Param.validate()`. This method does not handle input/output column 
parameters;
    * those are checked during schema validation.
    * @deprecated Will be removed in 2.1.0. All the checks should be merged 
into transformSchema
    */
@@ -580,8 +580,7 @@ trait Params extends Identifiable with Serializable {
   }
 
   /**
-   * Explains all params of this instance.
-   * @see [[explainParam()]]
+   * Explains all params of this instance. See `explainParam()`.
    */
   def explainParams(): String = {
     params.map(explainParam).mkString("\n")
@@ -678,7 +677,7 @@ trait Params extends Identifiable with Serializable {
   /**
    * Sets default values for a list of params.
    *
-   * Note: Java developers should use the single-parameter [[setDefault()]].
+   * Note: Java developers should use the single-parameter `setDefault`.
    *       Annotating this with varargs can cause compilation failures due to 
a Scala compiler bug.
    *       See SPARK-9268.
    *
@@ -712,8 +711,7 @@ trait Params extends Identifiable with Serializable {
   /**
    * Creates a copy of this instance with the same UID and some extra params.
    * Subclasses should implement this method and set the return type properly.
-   *
-   * @see [[defaultCopy()]]
+   * See `defaultCopy()`.
    */
   def copy(extra: ParamMap): Params
 
@@ -730,7 +728,8 @@ trait Params extends Identifiable with Serializable {
   /**
    * Extracts the embedded default param values and user-supplied values, and 
then merges them with
    * extra values from input into a flat param map, where the latter value is 
used if there exist
-   * conflicts, i.e., with ordering: default param values < user-supplied 
values < extra.
+   * conflicts, i.e., with ordering:
+   * default param values less than user-supplied values less than extra.
    */
   final def extractParamMap(extra: ParamMap): ParamMap = {
     defaultParamMap ++ paramMap ++ extra

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala 
b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
index a2c4c26..02e2384 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
@@ -99,7 +99,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
   with HasPredictionCol with HasCheckpointInterval with HasSeed {
 
   /**
-   * Param for rank of the matrix factorization (>= 1).
+   * Param for rank of the matrix factorization (positive).
    * Default: 10
    * @group param
    */
@@ -109,7 +109,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
   def getRank: Int = $(rank)
 
   /**
-   * Param for number of user blocks (>= 1).
+   * Param for number of user blocks (positive).
    * Default: 10
    * @group param
    */
@@ -120,7 +120,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
   def getNumUserBlocks: Int = $(numUserBlocks)
 
   /**
-   * Param for number of item blocks (>= 1).
+   * Param for number of item blocks (positive).
    * Default: 10
    * @group param
    */
@@ -141,7 +141,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
   def getImplicitPrefs: Boolean = $(implicitPrefs)
 
   /**
-   * Param for the alpha parameter in the implicit preference formulation (>= 
0).
+   * Param for the alpha parameter in the implicit preference formulation 
(nonnegative).
    * Default: 1.0
    * @group param
    */
@@ -174,7 +174,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
 
   /**
    * Param for StorageLevel for intermediate datasets. Pass in a string 
representation of
-   * [[StorageLevel]]. Cannot be "NONE".
+   * `StorageLevel`. Cannot be "NONE".
    * Default: "MEMORY_AND_DISK".
    *
    * @group expertParam
@@ -188,7 +188,7 @@ private[recommendation] trait ALSParams extends 
ALSModelParams with HasMaxIter w
 
   /**
    * Param for StorageLevel for ALS model factors. Pass in a string 
representation of
-   * [[StorageLevel]].
+   * `StorageLevel`.
    * Default: "MEMORY_AND_DISK".
    *
    * @group expertParam
@@ -351,11 +351,11 @@ object ALSModel extends MLReadable[ALSModel] {
  *
  * For implicit preference data, the algorithm used is based on
  * "Collaborative Filtering for Implicit Feedback Datasets", available at
- * [[http://dx.doi.org/10.1109/ICDM.2008.22]], adapted for the blocked 
approach used here.
+ * http://dx.doi.org/10.1109/ICDM.2008.22, adapted for the blocked approach 
used here.
  *
  * Essentially instead of finding the low-rank approximations to the rating 
matrix `R`,
  * this finds the approximations for a preference matrix `P` where the 
elements of `P` are 1 if
- * r > 0 and 0 if r <= 0. The ratings then act as 'confidence' values related 
to strength of
+ * r &gt; 0 and 0 if r &lt;= 0. The ratings then act as 'confidence' values 
related to strength of
  * indicated user
  * preferences rather than explicit ratings given to items.
  */

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala
index 8144bcb..07e98a1 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala
@@ -145,8 +145,8 @@ class LeafNode private[ml] (
  * Internal Decision Tree node.
  * @param prediction  Prediction this node would make if it were a leaf node
  * @param impurity  Impurity measure at this node (for training data)
- * @param gain Information gain value.
- *             Values < 0 indicate missing values; this quirk will be removed 
with future updates.
+ * @param gain Information gain value. Values less than 0 indicate missing 
values;
+ *             this quirk will be removed with future updates.
  * @param leftChild  Left-hand child node
  * @param rightChild  Right-hand child node
  * @param split  Information about the test used to split to the left or right 
child.

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala
index 47fe352..dff44e2 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala
@@ -151,8 +151,8 @@ class CategoricalSplit private[ml] (
 /**
  * Split which tests a continuous feature.
  * @param featureIndex  Index of the feature to test
- * @param threshold  If the feature value is <= this threshold, then the split 
goes left.
- *                    Otherwise, it goes right.
+ * @param threshold  If the feature value is less than or equal to this 
threshold, then the
+ *                   split goes left. Otherwise, it goes right.
  */
 class ContinuousSplit private[ml] (override val featureIndex: Int, val 
threshold: Double)
   extends Split {

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
index 5b6fcc5..d3cbc36 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
@@ -415,12 +415,12 @@ private[ml] object EnsembleModelReadWrite {
   /**
    * Helper method for loading a tree ensemble from disk.
    * This reconstructs all trees, returning the root nodes.
-   * @param path  Path given to [[saveImpl()]]
+   * @param path  Path given to `saveImpl`
    * @param className  Class name for ensemble model type
    * @param treeClassName  Class name for tree model type in the ensemble
    * @return  (ensemble metadata, array over trees of (tree metadata, root 
node)),
    *          where the root node is linked with all descendents
-   * @see [[saveImpl()]] for how the model was saved
+   * @see `saveImpl` for how the model was saved
    */
   def loadImpl(
       path: String,

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
index c8c2823..56fb2d3 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
@@ -173,8 +173,8 @@ object ChiSqSelectorModel extends 
Loader[ChiSqSelectorModel] {
  * Creates a ChiSquared feature selector.
  * @param numTopFeatures number of features that selector will select
  *                       (ordered by statistic value descending)
- *                       Note that if the number of features is < 
numTopFeatures, then this will
- *                       select all features.
+ *                       Note that if the number of features is less than 
numTopFeatures,
+ *                       then this will select all features.
  */
 @Since("1.3.0")
 class ChiSqSelector @Since("1.3.0") (

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
index 15b7220..aaecfa8 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
@@ -70,7 +70,7 @@ class PCA @Since("1.4.0") (@Since("1.4.0") val k: Int) {
   }
 
   /**
-   * Java-friendly version of [[fit()]]
+   * Java-friendly version of `fit()`.
    */
   @Since("1.4.0")
   def fit(sources: JavaRDD[Vector]): PCAModel = fit(sources.rdd)
@@ -91,7 +91,7 @@ class PCAModel private[spark] (
    * Transform a vector by computed Principal Components.
    *
    * @param vector vector to be transformed.
-   *               Vector must be the same length as the source vectors given 
to [[PCA.fit()]].
+   *               Vector must be the same length as the source vectors given 
to `PCA.fit()`.
    * @return transformed vector. Vector will be of length k.
    */
   @Since("1.4.0")

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
index b7d6c60..3e86c6c 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
@@ -27,9 +27,8 @@ import org.apache.spark.rdd.RDD
  * Standardizes features by removing the mean and scaling to unit std using 
column summary
  * statistics on the samples in the training set.
  *
- * The "unit std" is computed using the
- * 
[[https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation
- *   corrected sample standard deviation]],
+ * The "unit std" is computed using the corrected sample standard deviation
+ * 
(https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation),
  * which is computed as the square root of the unbiased sample variance.
  *
  * @param withMean False by default. Centers the data with mean before 
scaling. It will build a

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala
index 22e7027..c5e34ff 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala
@@ -20,8 +20,8 @@ package org.apache.spark.mllib.tree.impurity
 import org.apache.spark.annotation.{DeveloperApi, Since}
 
 /**
- * Class for calculating the
- * [[http://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity Gini 
impurity]]
+ * Class for calculating the Gini impurity
+ * (http://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity)
  * during multiclass classification.
  */
 @Since("1.0.0")

http://git-wip-us.apache.org/repos/asf/spark/blob/cad4693f/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
index 4d71d53..c881c8e 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
@@ -45,7 +45,7 @@ trait Saveable {
    *  - human-readable (JSON) model metadata to path/metadata/
    *  - Parquet formatted data to path/data/
    *
-   * The model may be loaded using [[Loader.load]].
+   * The model may be loaded using `Loader.load`.
    *
    * @param sc  Spark context used to save model data.
    * @param path  Path specifying the directory in which to save this model.


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to