Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/9581#discussion_r48904430
  
    --- Diff: python/pyspark/ml/param/_shared_params_code_gen.py ---
    @@ -105,44 +109,45 @@ def get$Name(self):
         print("\n# DO NOT MODIFY THIS FILE! It was generated by 
_shared_params_code_gen.py.\n")
         print("from pyspark.ml.param import Param, Params\n\n")
         shared = [
    -        ("maxIter", "max number of iterations (>= 0).", None),
    -        ("regParam", "regularization parameter (>= 0).", None),
    -        ("featuresCol", "features column name.", "'features'"),
    -        ("labelCol", "label column name.", "'label'"),
    -        ("predictionCol", "prediction column name.", "'prediction'"),
    +        ("maxIter", "max number of iterations (>= 0).", None, int),
    +        ("regParam", "regularization parameter (>= 0).", None, float),
    +        ("featuresCol", "features column name.", "'features'", None),
    +        ("labelCol", "label column name.", "'label'", str),
    +        ("predictionCol", "prediction column name.", "'prediction'", str),
             ("probabilityCol", "Column name for predicted class conditional 
probabilities. " +
              "Note: Not all models output well-calibrated probability 
estimates! These probabilities " +
    -         "should be treated as confidences, not precise probabilities.", 
"'probability'"),
    -        ("rawPredictionCol", "raw prediction (a.k.a. confidence) column 
name.", "'rawPrediction'"),
    -        ("inputCol", "input column name.", None),
    -        ("inputCols", "input column names.", None),
    -        ("outputCol", "output column name.", "self.uid + '__output'"),
    -        ("numFeatures", "number of features.", None),
    +         "should be treated as confidences, not precise probabilities.", 
"'probability'", str),
    +        ("rawPredictionCol", "raw prediction (a.k.a. confidence) column 
name.", "'rawPrediction'",
    +         str),
    +        ("inputCol", "input column name.", None, str),
    +        ("inputCols", "input column names.", None, None),
    +        ("outputCol", "output column name.", "self.uid + '__output'", 
None),
    +        ("numFeatures", "number of features.", None, int),
             ("checkpointInterval", "set checkpoint interval (>= 1) or disable 
checkpoint (-1). " +
    -         "E.g. 10 means that the cache will get checkpointed every 10 
iterations.", None),
    -        ("seed", "random seed.", "hash(type(self).__name__)"),
    -        ("tol", "the convergence tolerance for iterative algorithms.", 
None),
    -        ("stepSize", "Step size to be used for each iteration of 
optimization.", None),
    +         "E.g. 10 means that the cache will get checkpointed every 10 
iterations.", None, int),
    +        ("seed", "random seed.", "hash(type(self).__name__)", int),
    +        ("tol", "the convergence tolerance for iterative algorithms.", 
None, float),
    +        ("stepSize", "Step size to be used for each iteration of 
optimization.", None, float),
             ("handleInvalid", "how to handle invalid entries. Options are skip 
(which will filter " +
              "out rows with bad values), or error (which will throw an 
errror). More options may be " +
    -         "added later.", None),
    +         "added later.", None, bool),
    --- End diff --
    
    str


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to