Repository: spark
Updated Branches:
  refs/heads/branch-1.6 d40bf9ad8 -> 7e7f2627f


[SPARK-11980][SPARK-10621][SQL] Fix json_tuple and add test cases for

Added Python test cases for the function `isnan`, `isnull`, `nanvl` and 
`json_tuple`.

Fixed a bug in the function `json_tuple`

rxin , could you help me review my changes? Please let me know anything is 
missing.

Thank you! Have a good Thanksgiving day!

Author: gatorsmile <gatorsm...@gmail.com>

Closes #9977 from gatorsmile/json_tuple.

(cherry picked from commit 068b6438d6886ce5b4aa698383866f466d913d66)
Signed-off-by: Reynold Xin <r...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/7e7f2627
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/7e7f2627
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/7e7f2627

Branch: refs/heads/branch-1.6
Commit: 7e7f2627f941585a6fb1e086e22d6d1d25b692ab
Parents: d40bf9a
Author: gatorsmile <gatorsm...@gmail.com>
Authored: Wed Nov 25 23:24:33 2015 -0800
Committer: Reynold Xin <r...@databricks.com>
Committed: Wed Nov 25 23:24:40 2015 -0800

----------------------------------------------------------------------
 python/pyspark/sql/functions.py | 44 ++++++++++++++++++++++++++++--------
 1 file changed, 34 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/7e7f2627/python/pyspark/sql/functions.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index e3786e0..9062594 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -286,14 +286,6 @@ def countDistinct(col, *cols):
     return Column(jc)
 
 
-@since(1.4)
-def monotonicallyIncreasingId():
-    """
-    .. note:: Deprecated in 1.6, use monotonically_increasing_id instead.
-    """
-    return monotonically_increasing_id()
-
-
 @since(1.6)
 def input_file_name():
     """Creates a string column for the file name of the current Spark task.
@@ -305,6 +297,10 @@ def input_file_name():
 @since(1.6)
 def isnan(col):
     """An expression that returns true iff the column is NaN.
+
+    >>> df = sqlContext.createDataFrame([(1.0, float('nan')), (float('nan'), 
2.0)], ("a", "b"))
+    >>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
+    [Row(r1=False, r2=False), Row(r1=True, r2=True)]
     """
     sc = SparkContext._active_spark_context
     return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@@ -313,11 +309,23 @@ def isnan(col):
 @since(1.6)
 def isnull(col):
     """An expression that returns true iff the column is null.
+
+    >>> df = sqlContext.createDataFrame([(1, None), (None, 2)], ("a", "b"))
+    >>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
+    [Row(r1=False, r2=False), Row(r1=True, r2=True)]
     """
     sc = SparkContext._active_spark_context
     return Column(sc._jvm.functions.isnull(_to_java_column(col)))
 
 
+@since(1.4)
+def monotonicallyIncreasingId():
+    """
+    .. note:: Deprecated in 1.6, use monotonically_increasing_id instead.
+    """
+    return monotonically_increasing_id()
+
+
 @since(1.6)
 def monotonically_increasing_id():
     """A column that generates monotonically increasing 64-bit integers.
@@ -344,6 +352,10 @@ def nanvl(col1, col2):
     """Returns col1 if it is not NaN, or col2 if col1 is NaN.
 
     Both inputs should be floating point columns (DoubleType or FloatType).
+
+    >>> df = sqlContext.createDataFrame([(1.0, float('nan')), (float('nan'), 
2.0)], ("a", "b"))
+    >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, 
df.b).alias("r2")).collect()
+    [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
     """
     sc = SparkContext._active_spark_context
     return Column(sc._jvm.functions.nanvl(_to_java_column(col1), 
_to_java_column(col2)))
@@ -1460,6 +1472,7 @@ def explode(col):
     return Column(jc)
 
 
+@ignore_unicode_prefix
 @since(1.6)
 def get_json_object(col, path):
     """
@@ -1468,22 +1481,33 @@ def get_json_object(col, path):
 
     :param col: string column in json format
     :param path: path to the json object to extract
+
+    >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": 
"value12"}''')]
+    >>> df = sqlContext.createDataFrame(data, ("key", "jstring"))
+    >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \
+                          get_json_object(df.jstring, '$.f2').alias("c1") 
).collect()
+    [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', 
c1=None)]
     """
     sc = SparkContext._active_spark_context
     jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
     return Column(jc)
 
 
+@ignore_unicode_prefix
 @since(1.6)
-def json_tuple(col, fields):
+def json_tuple(col, *fields):
     """Creates a new row for a json column according to the given field names.
 
     :param col: string column in json format
     :param fields: list of fields to extract
 
+    >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": 
"value12"}''')]
+    >>> df = sqlContext.createDataFrame(data, ("key", "jstring"))
+    >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
+    [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', 
c1=None)]
     """
     sc = SparkContext._active_spark_context
-    jc = sc._jvm.functions.json_tuple(_to_java_column(col), fields)
+    jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, 
fields))
     return Column(jc)
 
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to