zeppelin-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mina...@apache.org
Subject zeppelin git commit: [ZEPPELIN-1509] Run certain SparkInterpreter tests only for Spark >= 1.3
Date Fri, 07 Oct 2016 01:37:50 GMT
Repository: zeppelin
Updated Branches:
  refs/heads/master 2771ba0b3 -> 1da262829


[ZEPPELIN-1509] Run certain SparkInterpreter tests only for Spark >= 1.3

### What is this PR for?
There are several test cases that are applicable only after Spark 1.3(For example, `testCreateDataFrame`).
This PR fixes CI failures occurred in #1475. #1475 should be merged after this one.

### What type of PR is it?
CI Fix

### Questions:
* Does the licenses files need update? no
* Is there breaking changes for older versions? no
* Does this needs documentation? no

Author: Mina Lee <minalee@apache.org>

Closes #1483 from minahlee/ZEPPELIN-1509_2 and squashes the following commits:

5a7ebb2 [Mina Lee] Run certain SparkInterpreter tests only when the version is greater than
or equal to 1.3


Project: http://git-wip-us.apache.org/repos/asf/zeppelin/repo
Commit: http://git-wip-us.apache.org/repos/asf/zeppelin/commit/1da26282
Tree: http://git-wip-us.apache.org/repos/asf/zeppelin/tree/1da26282
Diff: http://git-wip-us.apache.org/repos/asf/zeppelin/diff/1da26282

Branch: refs/heads/master
Commit: 1da262829a2ccb383e11713468f36e9214801ca0
Parents: 2771ba0
Author: Mina Lee <minalee@apache.org>
Authored: Wed Oct 5 00:03:20 2016 +0900
Committer: Mina Lee <minalee@apache.org>
Committed: Fri Oct 7 10:37:45 2016 +0900

----------------------------------------------------------------------
 .../zeppelin/spark/SparkInterpreterTest.java    | 84 ++++++++++++--------
 1 file changed, 49 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/zeppelin/blob/1da26282/spark/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
----------------------------------------------------------------------
diff --git a/spark/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java b/spark/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
index 1c7979f..ff26e6a 100644
--- a/spark/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
+++ b/spark/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
@@ -178,20 +178,29 @@ public class SparkInterpreterTest {
 
   @Test
   public void testCreateDataFrame() {
-    repl.interpret("case class Person(name:String, age:Int)\n", context);
-    repl.interpret("val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\",
51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context);
-    repl.interpret("people.toDF.count", context);
-    assertEquals(new Long(4), context.getResourcePool().get(
-        context.getNoteId(),
-        context.getParagraphId(),
-        WellKnownResourceName.ZeppelinReplResult.toString()).get());
+    if (getSparkVersionNumber() >= 13) {
+      repl.interpret("case class Person(name:String, age:Int)\n", context);
+      repl.interpret("val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\",
51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context);
+      repl.interpret("people.toDF.count", context);
+      assertEquals(new Long(4), context.getResourcePool().get(
+          context.getNoteId(),
+          context.getParagraphId(),
+          WellKnownResourceName.ZeppelinReplResult.toString()).get());
+    }
   }
 
   @Test
   public void testZShow() {
+    String code = "";
     repl.interpret("case class Person(name:String, age:Int)\n", context);
     repl.interpret("val people = sc.parallelize(Seq(Person(\"moon\", 33), Person(\"jobs\",
51), Person(\"gates\", 51), Person(\"park\", 34)))\n", context);
-    assertEquals(Code.SUCCESS, repl.interpret("z.show(people.toDF)", context).code());
+    if (getSparkVersionNumber() < 13) {
+      repl.interpret("people.registerTempTable(\"people\")", context);
+      code = "z.show(sqlc.sql(\"select * from people\"))";
+    } else {
+      code = "z.show(people.toDF)";
+    }
+      assertEquals(Code.SUCCESS, repl.interpret(code, context).code());
   }
 
   @Test
@@ -203,14 +212,15 @@ public class SparkInterpreterTest {
 
     if (getSparkVersionNumber() <= 11) { // spark 1.2 or later does not allow create multiple
SparkContext in the same jvm by default.
       // create new interpreter
-      Properties p = new Properties();
-      SparkInterpreter repl2 = new SparkInterpreter(p);
+      SparkInterpreter repl2 = new SparkInterpreter(getSparkTestProperties());
+      repl2.setInterpreterGroup(intpGroup);
+      intpGroup.get("note").add(repl2);
       repl2.open();
 
-      repl.interpret("case class Man(name:String, age:Int)", context);
-      repl.interpret("val man = sc.parallelize(Seq(Man(\"moon\", 33), Man(\"jobs\", 51),
Man(\"gates\", 51), Man(\"park\", 34)))", context);
-      assertEquals(Code.SUCCESS, repl.interpret("man.take(3)", context).code());
-      repl2.getSparkContext().stop();
+      repl2.interpret("case class Man(name:String, age:Int)", context);
+      repl2.interpret("val man = sc.parallelize(Seq(Man(\"moon\", 33), Man(\"jobs\", 51),
Man(\"gates\", 51), Man(\"park\", 34)))", context);
+      assertEquals(Code.SUCCESS, repl2.interpret("man.take(3)", context).code());
+      repl2.close();
     }
   }
 
@@ -253,33 +263,37 @@ public class SparkInterpreterTest {
 
   @Test
   public void testEnableImplicitImport() {
-    // Set option of importing implicits to "true", and initialize new Spark repl
-    Properties p = getSparkTestProperties();
-    p.setProperty("zeppelin.spark.importImplicit", "true");
-    SparkInterpreter repl2 = new SparkInterpreter(p);
-    repl2.setInterpreterGroup(intpGroup);
-    intpGroup.get("note").add(repl2);
+    if (getSparkVersionNumber() >= 13) {
+      // Set option of importing implicits to "true", and initialize new Spark repl
+      Properties p = getSparkTestProperties();
+      p.setProperty("zeppelin.spark.importImplicit", "true");
+      SparkInterpreter repl2 = new SparkInterpreter(p);
+      repl2.setInterpreterGroup(intpGroup);
+      intpGroup.get("note").add(repl2);
 
-    repl2.open();
-    String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")";
-    assertEquals(Code.SUCCESS, repl2.interpret(ddl, context).code());
-    repl2.close();
+      repl2.open();
+      String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")";
+      assertEquals(Code.SUCCESS, repl2.interpret(ddl, context).code());
+      repl2.close();
+    }
   }
 
   @Test
   public void testDisableImplicitImport() {
-    // Set option of importing implicits to "false", and initialize new Spark repl
-    // this test should return error status when creating DataFrame from sequence
-    Properties p = getSparkTestProperties();
-    p.setProperty("zeppelin.spark.importImplicit", "false");
-    SparkInterpreter repl2 = new SparkInterpreter(p);
-    repl2.setInterpreterGroup(intpGroup);
-    intpGroup.get("note").add(repl2);
+    if (getSparkVersionNumber() >= 13) {
+      // Set option of importing implicits to "false", and initialize new Spark repl
+      // this test should return error status when creating DataFrame from sequence
+      Properties p = getSparkTestProperties();
+      p.setProperty("zeppelin.spark.importImplicit", "false");
+      SparkInterpreter repl2 = new SparkInterpreter(p);
+      repl2.setInterpreterGroup(intpGroup);
+      intpGroup.get("note").add(repl2);
 
-    repl2.open();
-    String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")";
-    assertEquals(Code.ERROR, repl2.interpret(ddl, context).code());
-    repl2.close();
+      repl2.open();
+      String ddl = "val df = Seq((1, true), (2, false)).toDF(\"num\", \"bool\")";
+      assertEquals(Code.ERROR, repl2.interpret(ddl, context).code());
+      repl2.close();
+    }
   }
 
   @Test


Mime
View raw message