Github user WeichenXu123 commented on a diff in the pull request: https://github.com/apache/spark/pull/18742#discussion_r131772274 --- Diff: python/pyspark/ml/util.py --- @@ -61,33 +66,86 @@ def _randomUID(cls): @inherit_doc -class MLWriter(object): +class BaseReadWrite(object): """ - Utility class that can save ML instances. + Base class for MLWriter and MLReader. Stores information about the SparkContext + and SparkSession. - .. versionadded:: 2.0.0 + .. versionadded:: 2.3.0 """ - def save(self, path): - """Save the ML instance to the input path.""" - raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self)) - - def overwrite(self): - """Overwrites if the output path already exists.""" - raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self)) + def __init__(self): + self._sparkSession = None def context(self, sqlContext): """ - Sets the SQL context to use for saving. + Sets the Spark SQLContext to use for saving/loading. .. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead. """ - raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self)) + raise NotImplementedError("Read/Write is not yet implemented for type: %s" % type(self)) def session(self, sparkSession): - """Sets the Spark Session to use for saving.""" + """ + Sets the Spark Session to use for saving/loading. + """ + self._sparkSession = sparkSession + return self + + @property + def sparkSession(self): + """ + Returns the user-specified Spark Session or the default. + """ + if self._sparkSession is None: + self._sparkSession = SparkSession.builder.getOrCreate() + return self._sparkSession + + @property + def sc(self): + """ + Returns the underlying `SparkContext`. + """ + return self.sparkSession.sparkContext + + +@inherit_doc +class MLWriter(BaseReadWrite): + """ + Utility class that can save ML instances. + + .. versionadded:: 2.0.0 + """ + + def __init__(self): + super(MLWriter, self).__init__() + self.shouldOverwrite = False + + def _handleOverwrite(self, path): + from pyspark.ml.wrapper import JavaWrapper + + _java_obj = JavaWrapper._new_java_obj("org.apache.spark.ml.util.FileSystemOverwrite") + wrapper = JavaWrapper(_java_obj) + wrapper._call_java("handleOverwrite", path, True, self.sc._jsc.sc()) --- End diff -- The wrapper `FileSystemOverwrite`, can we abstract a more generic interface, such as `FileSystem` ? (Maybe it can be used in other place of pyspark, not only "file overwrite", also including other file system operations)
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org