This is an automated email from the ASF dual-hosted git repository.

zjffdu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/zeppelin.git


The following commit(s) were added to refs/heads/master by this push:
     new e908e4a  Add spark csv and json tests
e908e4a is described below

commit e908e4ae9b94f765be81f2fd8769e6e0271f202e
Author: Maziyar Panahi <maziyar.pan...@iscpif.fr>
AuthorDate: Wed Feb 6 11:29:06 2019 +0100

    Add spark csv and json tests
    
    ### What is this PR for?
    As it was discussed here: 
https://github.com/apache/zeppelin/pull/3290#issuecomment-457206170
    These two tests will make sure Apache Spark in Zeppelin can read both `csv` 
and `json` files.
    
    ### What type of PR is it?
    [Improvement]
    
    ### Todos
    * [ ] - Task
    
    ### What is the Jira issue?
    * Open an issue on Jira https://issues.apache.org/jira/browse/ZEPPELIN/
    * Put link here, and add [ZEPPELIN-*Jira number*] in PR title, eg. 
[ZEPPELIN-533]
    https://github.com/apache/zeppelin/pull/3290
    
    ### How should this be tested?
    * First time? Setup Travis CI as described on 
https://zeppelin.apache.org/contribution/contributions.html#continuous-integration
    * Strongly recommended: add automated unit tests for any new or changed 
behavior
    * Outline any manual steps to test the PR here.
    
    ### Screenshots (if appropriate)
    
    ### Questions:
    * Does the licenses files need update? Yes
    * Is there breaking changes for older versions? No
    * Does this needs documentation? No
    
    Author: Maziyar Panahi <maziyar.pan...@iscpif.fr>
    
    Closes #3304 from maziyarpanahi/master and squashes the following commits:
    
    2f7403509 [Maziyar Panahi] Add sparkReadJSON test
    fd71a0ef5 [Maziyar Panahi] Add spark csv and json tests
---
 .../integration/ZeppelinSparkClusterTest.java      | 48 ++++++++++++++++------
 1 file changed, 36 insertions(+), 12 deletions(-)

diff --git 
a/zeppelin-interpreter-integration/src/main/test/org/apache/zeppelin/integration/ZeppelinSparkClusterTest.java
 
b/zeppelin-interpreter-integration/src/main/test/org/apache/zeppelin/integration/ZeppelinSparkClusterTest.java
index 50779c9..b565f99 100644
--- 
a/zeppelin-interpreter-integration/src/main/test/org/apache/zeppelin/integration/ZeppelinSparkClusterTest.java
+++ 
b/zeppelin-interpreter-integration/src/main/test/org/apache/zeppelin/integration/ZeppelinSparkClusterTest.java
@@ -213,19 +213,43 @@ public class ZeppelinSparkClusterTest extends 
AbstractTestRestApi {
     TestUtils.getInstance(Notebook.class).removeNote(note.getId(), anonymous);
   }
 
-  @Test
-  public void sparkSQLTest() throws IOException {
-    Note note = TestUtils.getInstance(Notebook.class).createNote("note1", 
anonymous);
-    // test basic dataframe api
-    Paragraph p = note.addNewParagraph(anonymous);
-    p.setText("%spark val 
df=sqlContext.createDataFrame(Seq((\"hello\",20)))\n" +
-        "df.collect()");
-    note.run(p.getId(), true);
-    assertEquals(Status.FINISHED, p.getStatus());
-    assertTrue(p.getReturn().message().get(0).getData().contains(
-        "Array[org.apache.spark.sql.Row] = Array([hello,20])"));
+    @Test
+    public void sparkReadJSONTest() throws IOException {
+      Note note = ZeppelinServer.notebook.createNote(anonymous);
+      Paragraph p = note.addNewParagraph(anonymous);
+      p.setText("%spark val jsonStr = \"\"\"{ \"metadata\": { \"key\": 84896, 
\"value\": 54 }}\"\"\"\n" +
+              "spark.read.json(Seq(jsonStr).toDS)");
+      note.run(p.getId(), true);
+      assertEquals(Status.FINISHED, p.getStatus());
+      assertTrue(p.getResult().message().get(0).getData().contains(
+              "org.apache.spark.sql.DataFrame = [metadata: struct<key: bigint, 
value: bigint>]\n"));
+    }
+
+    @Test
+    public void sparkReadCSVTest() throws IOException {
+      Note note = ZeppelinServer.notebook.createNote(anonymous);
+      Paragraph p = note.addNewParagraph(anonymous);
+      p.setText("%spark val csvStr = \"\"\"84896,54\"\"\"\n" +
+              "spark.read.csv(Seq(csvStr).toDS)");
+      note.run(p.getId(), true);
+      assertEquals(Status.FINISHED, p.getStatus());
+      assertTrue(p.getResult().message().get(0).getData().contains(
+              "org.apache.spark.sql.DataFrame = [_c0: string, _c1: 
string]\n"));
+    }
+
+    @Test
+    public void sparkSQLTest() throws IOException {
+      Note note = ZeppelinServer.notebook.createNote(anonymous);
+      // test basic dataframe api
+      Paragraph p = note.addNewParagraph(anonymous);
+      p.setText("%spark val 
df=sqlContext.createDataFrame(Seq((\"hello\",20)))\n" +
+              "df.collect()");
+      note.run(p.getId(), true);
+      assertEquals(Status.FINISHED, p.getStatus());
+      assertTrue(p.getResult().message().get(0).getData().contains(
+              "Array[org.apache.spark.sql.Row] = Array([hello,20])"));
 
-    // test display DataFrame
+      // test display DataFrame
     p = note.addNewParagraph(anonymous);
     p.setText("%spark val 
df=sqlContext.createDataFrame(Seq((\"hello\",20)))\n" +
         "z.show(df)");

Reply via email to