Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20904#discussion_r180245120
  
    --- Diff: python/pyspark/ml/stat.py ---
    @@ -127,13 +113,86 @@ class Correlation(object):
         def corr(dataset, column, method="pearson"):
             """
             Compute the correlation matrix with specified method using dataset.
    +
    +        :param dataset:
    +          A Dataset or a DataFrame.
    +        :param column:
    +          The name of the column of vectors for which the correlation 
coefficient needs
    +          to be computed. This must be a column of the dataset, and it 
must contain
    +          Vector objects.
    +        :param method:
    +          String specifying the method to use for computing correlation.
    +          Supported: `pearson` (default), `spearman`.
    +        :return:
    +          A DataFrame that contains the correlation matrix of the column 
of vectors. This
    +          DataFrame contains a single row and a single column of name
    +          '$METHODNAME($COLUMN)'.
             """
             sc = SparkContext._active_spark_context
             javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
             args = [_py2java(sc, arg) for arg in (dataset, column, method)]
             return _java2py(sc, javaCorrObj.corr(*args))
     
     
    +class KolmogorovSmirnovTest(object):
    +    """
    +    .. note:: Experimental
    +
    +    Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled 
from a continuous
    +    distribution.
    +
    +    By comparing the largest difference between the empirical cumulative
    +    distribution of the sample data and the theoretical distribution we 
can provide a test for the
    +    the null hypothesis that the sample data comes from that theoretical 
distribution.
    +
    +    >>> from pyspark.ml.stat import KolmogorovSmirnovTest
    +    >>> dataset = [[-1.0], [0.0], [1.0]]
    +    >>> dataset = spark.createDataFrame(dataset, ['sample'])
    +    >>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 
0.0, 1.0).first()
    +    >>> round(ksResult.pValue, 3)
    +    1.0
    +    >>> round(ksResult.statistic, 3)
    +    0.175
    +    >>> dataset = [[2.0], [3.0], [4.0]]
    +    >>> dataset = spark.createDataFrame(dataset, ['sample'])
    +    >>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 
3.0, 1.0).first()
    +    >>> round(ksResult.pValue, 3)
    +    1.0
    +    >>> round(ksResult.statistic, 3)
    +    0.175
    +
    +    .. versionadded:: 2.4.0
    +
    +    """
    +    @staticmethod
    +    @since("2.4.0")
    +    def test(dataset, sampleCol, distName, *params):
    +        """
    +        Perform a Kolmogorov-Smirnov test using dataset.
    --- End diff --
    
    Can you please make this match the text in the Scala doc?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to