Github user mengxr commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8067#discussion_r39239398
  
    --- Diff: python/pyspark/ml/classification.py ---
    @@ -808,6 +809,129 @@ def theta(self):
             return self._call_java("theta")
     
     
    +@inherit_doc
    +class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, 
HasLabelCol, HasPredictionCol,
    +                                     HasMaxIter, HasTol, HasSeed):
    +    """
    +    Classifier trainer based on the Multilayer Perceptron.
    +    Each layer has sigmoid activation function, output layer has softmax.
    +    Number of inputs has to be equal to the size of feature vectors.
    +    Number of outputs has to be equal to the total number of labels.
    +
    +    >>> from pyspark.sql import Row
    +    >>> from pyspark.mllib.linalg import Vectors
    +    >>> df = sc.parallelize([
    +    ...     Row(label=0.0, features=Vectors.dense([0.0, 0.0])),
    +    ...     Row(label=1.0, features=Vectors.dense([0.0, 1.0])),
    +    ...     Row(label=1.0, features=Vectors.dense([1.0, 0.0])),
    +    ...     Row(label=0.0, features=Vectors.dense([1.0, 1.0]))]).toDF()
    +    >>> myLayers = [2, 5, 2]
    +    >>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=myLayers, 
blockSize=1, seed=11)
    +    >>> model = mlp.fit(df)
    +    >>> model.layers
    +    [2, 5, 2]
    +    >>> model.weights.size
    +    27
    +    >>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 
0.0]))]).toDF()
    +    >>> model.transform(test0).head().prediction
    +    1.0
    +    >>> test1 = sc.parallelize([Row(features=Vectors.dense([0.0, 
0.0]))]).toDF()
    +    >>> model.transform(test1).head().prediction
    +    0.0
    +    """
    +
    +    # a placeholder to make it appear in the generated doc
    +    layers = Param(Params._dummy(), "layers", "Sizes of layers from input 
layer to output layer " +
    +                   "E.g., Array(780, 100, 10) means 780 inputs, one hidden 
layer with 100 " +
    +                   "neurons and output layer of 10 neurons, default is [1, 
1].")
    +    blockSize = Param(Params._dummy(), "blockSize", "Block size for 
stacking input data in " +
    +                      "matrices. Data is stacked within partitions. If 
block size is more than " +
    +                      "remaining data in a partition then it is adjusted 
to the size of this " +
    +                      "data. Recommended size is between 10 and 1000, 
default is 128.")
    +
    +    @keyword_only
    +    def __init__(self, featuresCol="features", labelCol="label", 
predictionCol="prediction",
    +                 maxIter=100, tol=1e-4, seed=None, layers=[1, 1], 
blockSize=128):
    --- End diff --
    
    We shouldn't use mutable value as the default value. Use "None" instead.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to