This is an automated email from the ASF dual-hosted git repository. gurwls223 pushed a commit to branch branch-3.0 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.0 by this push: new e0ab022 [MINOR][DOCS] Fix some links for python api doc e0ab022 is described below commit e0ab0228a716f36e4115a1361dd45bda0fe43beb Author: Kent Yao <yaooq...@hotmail.com> AuthorDate: Thu Mar 26 13:06:21 2020 +0900 [MINOR][DOCS] Fix some links for python api doc ### What changes were proposed in this pull request? the link for `partition discovery` is malformed, because for releases, there will contains` /docs/<version>/` in the full URL. ### Why are the changes needed? fix doc ### Does this PR introduce any user-facing change? no ### How was this patch tested? `SKIP_SCALADOC=1 SKIP_RDOC=1 SKIP_SQLDOC=1 jekyll serve` locally verified Closes #28017 from yaooqinn/doc. Authored-by: Kent Yao <yaooq...@hotmail.com> Signed-off-by: HyukjinKwon <gurwls...@apache.org> (cherry picked from commit b024a8a69e4ae45c6ded3dd3f9f27e73a0069891) Signed-off-by: HyukjinKwon <gurwls...@apache.org> --- python/pyspark/sql/functions.py | 4 ---- python/pyspark/sql/readwriter.py | 9 ++------- python/pyspark/sql/streaming.py | 5 ++--- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py index bbadb54..61a221c 100644 --- a/python/pyspark/sql/functions.py +++ b/python/pyspark/sql/functions.py @@ -1143,8 +1143,6 @@ def to_date(col, format=None): By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format is omitted. Equivalent to ``col.cast("date")``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] @@ -1168,8 +1166,6 @@ def to_timestamp(col, format=None): By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format is omitted. Equivalent to ``col.cast("timestamp")``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t).alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index e7ecb3b..8179784 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -253,7 +253,8 @@ class DataFrameReader(OptionUtils): :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _partition discovery: /sql-data-sources-parquet.html#partition-discovery + .. _partition discovery: + https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html >>> df1 = spark.read.json('python/test_support/sql/people.json') @@ -490,8 +491,6 @@ class DataFrameReader(OptionUtils): :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] @@ -865,8 +864,6 @@ class DataFrameWriter(OptionUtils): :param ignoreNullFields: Whether to ignore null fields when generating JSON objects. If None is set, it uses the default value, ``true``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) @@ -981,8 +978,6 @@ class DataFrameWriter(OptionUtils): :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py index a831678..a5e8646 100644 --- a/python/pyspark/sql/streaming.py +++ b/python/pyspark/sql/streaming.py @@ -489,7 +489,8 @@ class DataStreamReader(OptionUtils): :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _partition discovery: /sql-data-sources-parquet.html#partition-discovery + .. _partition discovery: + https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) @@ -725,8 +726,6 @@ class DataStreamReader(OptionUtils): :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org