Repository: spark Updated Branches: refs/heads/master b660de7a9 -> 050616b40
[SPARK-4578] fix asDict() with nested Row() The Row object is created on the fly once the field is accessed, so we should access them by getattr() in asDict(0 Author: Davies Liu <dav...@databricks.com> Closes #3434 from davies/fix_asDict and squashes the following commits: b20f1e7 [Davies Liu] fix asDict() with nested Row() Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/050616b4 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/050616b4 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/050616b4 Branch: refs/heads/master Commit: 050616b408c60eae02256913ceb645912dbff62e Parents: b660de7 Author: Davies Liu <dav...@databricks.com> Authored: Mon Nov 24 16:41:23 2014 -0800 Committer: Patrick Wendell <pwend...@gmail.com> Committed: Mon Nov 24 16:41:23 2014 -0800 ---------------------------------------------------------------------- python/pyspark/sql.py | 2 +- python/pyspark/tests.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/spark/blob/050616b4/python/pyspark/sql.py ---------------------------------------------------------------------- diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py index abb284d..ae28847 100644 --- a/python/pyspark/sql.py +++ b/python/pyspark/sql.py @@ -1178,7 +1178,7 @@ def _create_cls(dataType): def asDict(self): """ Return as a dict """ - return dict(zip(self.__FIELDS__, self)) + return dict((n, getattr(self, n)) for n in self.__FIELDS__) def __repr__(self): # call collect __repr__ for nested objects http://git-wip-us.apache.org/repos/asf/spark/blob/050616b4/python/pyspark/tests.py ---------------------------------------------------------------------- diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py index a01bd8d..29bcd38 100644 --- a/python/pyspark/tests.py +++ b/python/pyspark/tests.py @@ -803,7 +803,7 @@ class SQLTests(ReusedPySparkTestCase): @classmethod def tearDownClass(cls): ReusedPySparkTestCase.tearDownClass() - shutil.rmtree(cls.tempdir.name) + shutil.rmtree(cls.tempdir.name, ignore_errors=True) def setUp(self): self.sqlCtx = SQLContext(self.sc) @@ -930,8 +930,9 @@ class SQLTests(ReusedPySparkTestCase): rdd = self.sc.parallelize([row]) srdd = self.sqlCtx.inferSchema(rdd) srdd.registerTempTable("test") - row = self.sqlCtx.sql("select l[0].a AS la from test").first() - self.assertEqual(1, row.asDict()["la"]) + row = self.sqlCtx.sql("select l, d from test").first() + self.assertEqual(1, row.asDict()["l"][0].a) + self.assertEqual(1.0, row.asDict()['d']['key'].c) def test_infer_schema_with_udt(self): from pyspark.tests import ExamplePoint, ExamplePointUDT --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org