Github user yanboliang commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8067#discussion_r39142512
  
    --- Diff: python/pyspark/ml/classification.py ---
    @@ -808,6 +809,118 @@ def theta(self):
             return self._call_java("theta")
     
     
    +@inherit_doc
    +class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, 
HasLabelCol, HasPredictionCol,
    +                                     HasMaxIter, HasTol, HasSeed):
    +    """
    +    Classifier trainer based on the Multilayer Perceptron.
    +    Each layer has sigmoid activation function, output layer has softmax.
    +    Number of inputs has to be equal to the size of feature vectors.
    +    Number of outputs has to be equal to the total number of labels.
    +
    +    >>> from pyspark.sql import Row
    +    >>> from pyspark.mllib.linalg import Vectors
    +    >>> df = sc.parallelize([
    +    ...     Row(label=0.0, features=Vectors.dense([0.0, 0.0])),
    +    ...     Row(label=1.0, features=Vectors.dense([0.0, 1.0])),
    +    ...     Row(label=1.0, features=Vectors.dense([1.0, 0.0])),
    +    ...     Row(label=0.0, features=Vectors.dense([1.0, 1.0]))]).toDF()
    +    >>> myLayers = [2, 5, 2]
    +    >>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=myLayers, 
blockSize=1, seed=11)
    +    >>> model = mlp.fit(df)
    +    >>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 
0.0]))]).toDF()
    +    >>> model.transform(test0).head().prediction
    +    1.0
    +    >>> test1 = sc.parallelize([Row(features=Vectors.dense([0.0, 
0.0]))]).toDF()
    +    >>> model.transform(test1).head().prediction
    +    0.0
    +    """
    +
    +    # a placeholder to make it appear in the generated doc
    +    layers = Param(Params._dummy(), "layers", "Sizes of layers from input 
layer to output layer " +
    +                   "E.g., Array(780, 100, 10) means 780 inputs, one hidden 
layer with 100 " +
    +                   "neurons and output layer of 10 neurons, default is [1, 
1].")
    +    blockSize = Param(Params._dummy(), "blockSize", "Block size for 
stacking input data in " +
    +                      "matrices. Data is stacked within partitions. If 
block size is more than " +
    +                      "remaining data in a partition then it is adjusted 
to the size of this " +
    +                      "data. Recommended size is between 10 and 1000, 
default is 128.")
    +
    +    @keyword_only
    +    def __init__(self, featuresCol="features", labelCol="label", 
predictionCol="prediction",
    +                 maxIter=100, tol=1e-4, seed=None, layers=[1, 1], 
blockSize=128):
    +        """
    +        __init__(self, featuresCol="features", labelCol="label", 
predictionCol="prediction",
    +                 maxIter=100, tol=1e-4, seed=None, layers=[1, 1], 
blockSize=128)
    +        """
    +        super(MultilayerPerceptronClassifier, self).__init__()
    +        self._java_obj = self._new_java_obj(
    +            
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
    +        self.layers = Param(self, "layers", "Sizes of layers from input 
layer to output layer " +
    +                            "E.g., Array(780, 100, 10) means 780 inputs, 
one hidden layer with " +
    +                            "100 neurons and output layer of 10 neurons, 
default is [1, 1].")
    +        self.blockSize = Param(self, "blockSize", "Block size for stacking 
input data in " +
    +                               "matrices. Data is stacked within 
partitions. If block size is " +
    +                               "more than remaining data in a partition 
then it is adjusted to " +
    +                               "the size of this data. Recommended size is 
between 10 and 1000, " +
    +                               "default is 128.")
    +        self._setDefault(maxIter=100, tol=1E-4, layers=[1, 1], 
blockSize=128)
    +        kwargs = self.__init__._input_kwargs
    +        self.setParams(**kwargs)
    +
    +    @keyword_only
    +    def setParams(self, featuresCol="features", labelCol="label", 
predictionCol="prediction",
    +                  maxIter=100, tol=1e-4, seed=None, layers=[1, 1], 
blockSize=128):
    +        """
    +        setParams(self, featuresCol="features", labelCol="label", 
predictionCol="prediction",
    +                  maxIter=100, tol=1e-4, seed=None, layers=[1, 1], 
blockSize=128)
    +        Sets params for MultilayerPerceptronClassifier.
    +        """
    +        kwargs = self.setParams._input_kwargs
    +        return self._set(**kwargs)
    +
    +    def _create_model(self, java_model):
    +        return MultilayerPerceptronClassificationModel(java_model)
    +
    +    def setLayers(self, value):
    +        """
    +        Sets the value of :py:attr:`layers`.
    +        """
    +        self._paramMap[self.layers] = value
    +        return self
    +
    +    def getLayers(self):
    +        """
    +        Gets the value of layers or its default value.
    +        """
    +        return self.getOrDefault(self.layers)
    +
    +    def setBlockSize(self, value):
    +        """
    +        Sets the value of :py:attr:`blockSize`.
    +        """
    +        self._paramMap[self.blockSize] = value
    +        return self
    +
    +    def getBlockSize(self):
    +        """
    +        Gets the value of blockSize or its default value.
    +        """
    +        return self.getOrDefault(self.blockSize)
    +
    +
    +class MultilayerPerceptronClassificationModel(JavaModel):
    +    """
    +    Model fitted by MultilayerPerceptronClassifier.
    +    """
    +
    --- End diff --
    
    Agree, done.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to