spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wenc...@apache.org
Subject [1/5] spark git commit: [SPARK-19810][BUILD][CORE] Remove support for Scala 2.10
Date Thu, 13 Jul 2017 09:06:30 GMT
Repository: spark
Updated Branches:
  refs/heads/master e08d06b37 -> 425c4ada4


http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 8258407..69d110e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -359,7 +359,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
 
   test("reduce") {
     val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
-    assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
+    assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == (("sum", 6)))
   }
 
   test("joinWith, flat schema") {
@@ -784,7 +784,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
   test("SPARK-14000: case class with tuple type field") {
     checkDataset(
       Seq(TupleClass((1, "a"))).toDS(),
-      TupleClass(1, "a")
+      TupleClass((1, "a"))
     )
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
index b80bd80..9e459ed 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
@@ -176,16 +176,16 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase
with Shared
 
   test("number format in statistics") {
     val numbers = Seq(
-      BigInt(0) -> ("0.0 B", "0"),
-      BigInt(100) -> ("100.0 B", "100"),
-      BigInt(2047) -> ("2047.0 B", "2.05E+3"),
-      BigInt(2048) -> ("2.0 KB", "2.05E+3"),
-      BigInt(3333333) -> ("3.2 MB", "3.33E+6"),
-      BigInt(4444444444L) -> ("4.1 GB", "4.44E+9"),
-      BigInt(5555555555555L) -> ("5.1 TB", "5.56E+12"),
-      BigInt(6666666666666666L) -> ("5.9 PB", "6.67E+15"),
-      BigInt(1L << 10 ) * (1L << 60) -> ("1024.0 EB", "1.18E+21"),
-      BigInt(1L << 11) * (1L << 60) -> ("2.36E+21 B", "2.36E+21")
+      BigInt(0) -> (("0.0 B", "0")),
+      BigInt(100) -> (("100.0 B", "100")),
+      BigInt(2047) -> (("2047.0 B", "2.05E+3")),
+      BigInt(2048) -> (("2.0 KB", "2.05E+3")),
+      BigInt(3333333) -> (("3.2 MB", "3.33E+6")),
+      BigInt(4444444444L) -> (("4.1 GB", "4.44E+9")),
+      BigInt(5555555555555L) -> (("5.1 TB", "5.56E+12")),
+      BigInt(6666666666666666L) -> (("5.9 PB", "6.67E+15")),
+      BigInt(1L << 10 ) * (1L << 60) -> (("1024.0 EB", "1.18E+21")),
+      BigInt(1L << 11) * (1L << 60) -> (("2.36E+21 B", "2.36E+21"))
     )
     numbers.foreach { case (input, (expectedSize, expectedRows)) =>
       val stats = Statistics(sizeInBytes = input, rowCount = Some(input))

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
index 7bcb419..274694b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
@@ -27,23 +27,23 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
   val row = identity[(java.lang.Integer, java.lang.Double)](_)
 
   lazy val l = Seq(
-    row(1, 2.0),
-    row(1, 2.0),
-    row(2, 1.0),
-    row(2, 1.0),
-    row(3, 3.0),
-    row(null, null),
-    row(null, 5.0),
-    row(6, null)).toDF("a", "b")
+    row((1, 2.0)),
+    row((1, 2.0)),
+    row((2, 1.0)),
+    row((2, 1.0)),
+    row((3, 3.0)),
+    row((null, null)),
+    row((null, 5.0)),
+    row((6, null))).toDF("a", "b")
 
   lazy val r = Seq(
-    row(2, 3.0),
-    row(2, 3.0),
-    row(3, 2.0),
-    row(4, 1.0),
-    row(null, null),
-    row(null, 5.0),
-    row(6, null)).toDF("c", "d")
+    row((2, 3.0)),
+    row((2, 3.0)),
+    row((3, 2.0)),
+    row((4, 1.0)),
+    row((null, null)),
+    row((null, 5.0)),
+    row((6, null))).toDF("c", "d")
 
   lazy val t = r.filter($"c".isNotNull && $"d".isNotNull)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index 94a2f9a..d76990b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -211,7 +211,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext
{
   }
 
   testStandardAndLegacyModes("nested map with struct as value type") {
-    val data = (1 to 4).map(i => Tuple1(Map(i -> (i, s"val_$i"))))
+    val data = (1 to 4).map(i => Tuple1(Map(i -> ((i, s"val_$i")))))
     withParquetDataFrame(data) { df =>
       checkAnswer(df, data.map { case Tuple1(m) =>
         Row(m.mapValues(struct => Row(struct.productIterator.toSeq: _*)))

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
index 2911cbb..fd79323 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
@@ -146,8 +146,8 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
     // PhysicalRDD(nodeId = 1) -> Filter(nodeId = 0)
     val df = person.filter('age < 25)
     testSparkPlanMetrics(df, 1, Map(
-      0L -> ("Filter", Map(
-        "number of output rows" -> 1L)))
+      0L -> (("Filter", Map(
+        "number of output rows" -> 1L))))
     )
   }
 
@@ -170,8 +170,8 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       Map("number of output rows" -> 1L,
         "avg hash probe (min, med, max)" -> "\n(1, 1, 1)"))
     testSparkPlanMetrics(df, 1, Map(
-      2L -> ("HashAggregate", expected1(0)),
-      0L -> ("HashAggregate", expected1(1)))
+      2L -> (("HashAggregate", expected1(0))),
+      0L -> (("HashAggregate", expected1(1))))
     )
 
     // 2 partitions and each partition contains 2 keys
@@ -182,8 +182,8 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       Map("number of output rows" -> 3L,
         "avg hash probe (min, med, max)" -> "\n(1, 1, 1)"))
     testSparkPlanMetrics(df2, 1, Map(
-      2L -> ("HashAggregate", expected2(0)),
-      0L -> ("HashAggregate", expected2(1)))
+      2L -> (("HashAggregate", expected2(0))),
+      0L -> (("HashAggregate", expected2(1))))
     )
   }
 
@@ -234,15 +234,15 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
     // -> ObjectHashAggregate(nodeId = 0)
     val df = testData2.groupBy().agg(collect_set('a)) // 2 partitions
     testSparkPlanMetrics(df, 1, Map(
-      2L -> ("ObjectHashAggregate", Map("number of output rows" -> 2L)),
-      0L -> ("ObjectHashAggregate", Map("number of output rows" -> 1L)))
+      2L -> (("ObjectHashAggregate", Map("number of output rows" -> 2L))),
+      0L -> (("ObjectHashAggregate", Map("number of output rows" -> 1L))))
     )
 
     // 2 partitions and each partition contains 2 keys
     val df2 = testData2.groupBy('a).agg(collect_set('a))
     testSparkPlanMetrics(df2, 1, Map(
-      2L -> ("ObjectHashAggregate", Map("number of output rows" -> 4L)),
-      0L -> ("ObjectHashAggregate", Map("number of output rows" -> 3L)))
+      2L -> (("ObjectHashAggregate", Map("number of output rows" -> 4L))),
+      0L -> (("ObjectHashAggregate", Map("number of output rows" -> 3L))))
     )
   }
 
@@ -264,9 +264,9 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       val df = spark.sql(
         "SELECT * FROM testData2 JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
       testSparkPlanMetrics(df, 1, Map(
-        0L -> ("SortMergeJoin", Map(
+        0L -> (("SortMergeJoin", Map(
           // It's 4 because we only read 3 rows in the first partition and 1 row in the second
one
-          "number of output rows" -> 4L)))
+          "number of output rows" -> 4L))))
       )
     }
   }
@@ -282,17 +282,17 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       val df = spark.sql(
         "SELECT * FROM testData2 left JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
       testSparkPlanMetrics(df, 1, Map(
-        0L -> ("SortMergeJoin", Map(
+        0L -> (("SortMergeJoin", Map(
           // It's 4 because we only read 3 rows in the first partition and 1 row in the second
one
-          "number of output rows" -> 8L)))
+          "number of output rows" -> 8L))))
       )
 
       val df2 = spark.sql(
         "SELECT * FROM testDataForJoin right JOIN testData2 ON testData2.a = testDataForJoin.a")
       testSparkPlanMetrics(df2, 1, Map(
-        0L -> ("SortMergeJoin", Map(
+        0L -> (("SortMergeJoin", Map(
           // It's 4 because we only read 3 rows in the first partition and 1 row in the second
one
-          "number of output rows" -> 8L)))
+          "number of output rows" -> 8L))))
       )
     }
   }
@@ -304,9 +304,9 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
     // ... -> BroadcastHashJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
     val df = df1.join(broadcast(df2), "key")
     testSparkPlanMetrics(df, 2, Map(
-      1L -> ("BroadcastHashJoin", Map(
+      1L -> (("BroadcastHashJoin", Map(
         "number of output rows" -> 2L,
-        "avg hash probe (min, med, max)" -> "\n(1, 1, 1)")))
+        "avg hash probe (min, med, max)" -> "\n(1, 1, 1)"))))
     )
   }
 
@@ -365,9 +365,9 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       val df = df1.join(df2, "key")
       val metrics = getSparkPlanMetrics(df, 1, Set(1L))
       testSparkPlanMetrics(df, 1, Map(
-        1L -> ("ShuffledHashJoin", Map(
+        1L -> (("ShuffledHashJoin", Map(
           "number of output rows" -> 2L,
-          "avg hash probe (min, med, max)" -> "\n(1, 1, 1)")))
+          "avg hash probe (min, med, max)" -> "\n(1, 1, 1)"))))
       )
     }
   }
@@ -426,14 +426,14 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
     // ... -> BroadcastHashJoin(nodeId = 0)
     val df = df1.join(broadcast(df2), $"key" === $"key2", "left_outer")
     testSparkPlanMetrics(df, 2, Map(
-      0L -> ("BroadcastHashJoin", Map(
-        "number of output rows" -> 5L)))
+      0L -> (("BroadcastHashJoin", Map(
+        "number of output rows" -> 5L))))
     )
 
     val df3 = df1.join(broadcast(df2), $"key" === $"key2", "right_outer")
     testSparkPlanMetrics(df3, 2, Map(
-      0L -> ("BroadcastHashJoin", Map(
-        "number of output rows" -> 6L)))
+      0L -> (("BroadcastHashJoin", Map(
+        "number of output rows" -> 6L))))
     )
   }
 
@@ -448,8 +448,8 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
           "SELECT * FROM testData2 left JOIN testDataForJoin ON " +
             "testData2.a * testDataForJoin.a != testData2.a + testDataForJoin.a")
         testSparkPlanMetrics(df, 3, Map(
-          1L -> ("BroadcastNestedLoopJoin", Map(
-            "number of output rows" -> 12L)))
+          1L -> (("BroadcastNestedLoopJoin", Map(
+            "number of output rows" -> 12L))))
         )
       }
     }
@@ -462,8 +462,8 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
     // ... -> BroadcastHashJoin(nodeId = 0)
     val df = df1.join(broadcast(df2), $"key" === $"key2", "leftsemi")
     testSparkPlanMetrics(df, 2, Map(
-      0L -> ("BroadcastHashJoin", Map(
-        "number of output rows" -> 2L)))
+      0L -> (("BroadcastHashJoin", Map(
+        "number of output rows" -> 2L))))
     )
   }
 
@@ -477,7 +477,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
         val df = spark.sql(
           "SELECT * FROM testData2 JOIN testDataForJoin")
         testSparkPlanMetrics(df, 1, Map(
-          0L -> ("CartesianProduct", Map("number of output rows" -> 12L)))
+          0L -> (("CartesianProduct", Map("number of output rows" -> 12L))))
         )
       }
     }
@@ -490,7 +490,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
       val df = spark.sql(
         "SELECT * FROM testData2 ANTI JOIN antiData ON testData2.a = antiData.a")
       testSparkPlanMetrics(df, 1, Map(
-        0L -> ("SortMergeJoin", Map("number of output rows" -> 4L)))
+        0L -> (("SortMergeJoin", Map("number of output rows" -> 4L))))
       )
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
index 82eff5e..2c6763e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
@@ -395,7 +395,7 @@ class SQLListenerSuite extends SparkFunSuite with SharedSQLContext with
JsonTest
     }
     // Listener tracks only SQL metrics, not other accumulators
     assert(trackedAccums.size === 1)
-    assert(trackedAccums.head === (sqlMetricInfo.id, sqlMetricInfo.update.get))
+    assert(trackedAccums.head === ((sqlMetricInfo.id, sqlMetricInfo.update.get)))
   }
 
   test("driver side SQL metrics") {

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala
index d826d3f..f65dcdf 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala
@@ -27,7 +27,7 @@ class ReduceAggregatorSuite extends SparkFunSuite {
     val encoder: ExpressionEncoder[Int] = ExpressionEncoder()
     val func = (v1: Int, v2: Int) => v1 + v2
     val aggregator: ReduceAggregator[Int] = new ReduceAggregator(func)(Encoders.scalaInt)
-    assert(aggregator.zero == (false, null))
+    assert(aggregator.zero == (false, null).asInstanceOf[(Boolean, Int)])
   }
 
   test("reduce, merge and finish") {
@@ -36,22 +36,22 @@ class ReduceAggregatorSuite extends SparkFunSuite {
     val aggregator: ReduceAggregator[Int] = new ReduceAggregator(func)(Encoders.scalaInt)
 
     val firstReduce = aggregator.reduce(aggregator.zero, 1)
-    assert(firstReduce == (true, 1))
+    assert(firstReduce == ((true, 1)))
 
     val secondReduce = aggregator.reduce(firstReduce, 2)
-    assert(secondReduce == (true, 3))
+    assert(secondReduce == ((true, 3)))
 
     val thirdReduce = aggregator.reduce(secondReduce, 3)
-    assert(thirdReduce == (true, 6))
+    assert(thirdReduce == ((true, 6)))
 
     val mergeWithZero1 = aggregator.merge(aggregator.zero, firstReduce)
-    assert(mergeWithZero1 == (true, 1))
+    assert(mergeWithZero1 == ((true, 1)))
 
     val mergeWithZero2 = aggregator.merge(secondReduce, aggregator.zero)
-    assert(mergeWithZero2 == (true, 3))
+    assert(mergeWithZero2 == ((true, 3)))
 
     val mergeTwoReduced = aggregator.merge(firstReduce, secondReduce)
-    assert(mergeTwoReduced == (true, 4))
+    assert(mergeTwoReduced == ((true, 4)))
 
     assert(aggregator.finish(firstReduce)== 1)
     assert(aggregator.finish(secondReduce) == 3)

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
index 59c6a6f..d21d1f1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
@@ -20,12 +20,10 @@ package org.apache.spark.sql.streaming
 import java.util.UUID
 
 import scala.collection.mutable
-import scala.concurrent.duration._
 import scala.language.reflectiveCalls
 
 import org.scalactic.TolerantNumerics
 import org.scalatest.concurrent.AsyncAssertions.Waiter
-import org.scalatest.concurrent.Eventually._
 import org.scalatest.concurrent.PatienceConfiguration.Timeout
 import org.scalatest.BeforeAndAfter
 import org.scalatest.PrivateMethodTester._

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
index 3de60c7..a29d7a7 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
@@ -414,7 +414,7 @@ private[spark] object HiveUtils extends Logging {
   protected[sql] def toHiveString(a: (Any, DataType)): String = a match {
     case (struct: Row, StructType(fields)) =>
       struct.toSeq.zip(fields).map {
-        case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}"""
+        case (v, t) => s""""${t.name}":${toHiveStructString((v, t.dataType))}"""
       }.mkString("{", ",", "}")
     case (seq: Seq[_], ArrayType(typ, _)) =>
       seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")
@@ -437,7 +437,7 @@ private[spark] object HiveUtils extends Logging {
   protected def toHiveStructString(a: (Any, DataType)): String = a match {
     case (struct: Row, StructType(fields)) =>
       struct.toSeq.zip(fields).map {
-        case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}"""
+        case (v, t) => s""""${t.name}":${toHiveStructString((v, t.dataType))}"""
       }.mkString("{", ",", "}")
     case (seq: Seq[_], ArrayType(typ, _)) =>
       seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index d8fd68b..8140f88 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -80,17 +80,17 @@ class DataSourceWithHiveMetastoreCatalogSuite
   ).coalesce(1)
 
   Seq(
-    "parquet" -> (
+    "parquet" -> ((
       "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
       "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
       "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
-    ),
+    )),
 
-    "orc" -> (
+    "orc" -> ((
       "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
       "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat",
       "org.apache.hadoop.hive.ql.io.orc.OrcSerde"
-    )
+    ))
   ).foreach { case (provider, (inputFormat, outputFormat, serde)) =>
     test(s"Persist non-partitioned $provider relation into metastore as managed table") {
       withTable("t") {

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
index 5f15a70..1771546 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
@@ -151,7 +151,7 @@ class HiveSparkSubmitSuite
     // the HiveContext code mistakenly overrides the class loader that contains user classes.
     // For more detail, see sql/hive/src/test/resources/regression-test-SPARK-8489/*scala.
     val version = Properties.versionNumberString match {
-      case v if v.startsWith("2.10") || v.startsWith("2.11") => v.substring(0, 4)
+      case v if v.startsWith("2.12") || v.startsWith("2.11") => v.substring(0, 4)
       case x => throw new Exception(s"Unsupported Scala Version: $x")
     }
     val jarDir = getTestResourcePath("regression-test-SPARK-8489")

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index 12daf3a..5b62e37 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -20,6 +20,8 @@ package org.apache.spark.sql.hive.execution
 import java.io.File
 import java.net.URI
 
+import scala.language.existentials
+
 import org.apache.hadoop.fs.Path
 import org.scalatest.BeforeAndAfterEach
 

http://git-wip-us.apache.org/repos/asf/spark/blob/425c4ada/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index cf33760..799abc1 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -795,7 +795,7 @@ class HiveQuerySuite extends HiveComparisonTest with SQLTestUtils with
BeforeAnd
     sql("SELECT * FROM m").collect().zip(sql("SELECT * FROM src LIMIT 10").collect()).foreach
{
       case (Row(map: Map[_, _]), Row(key: Int, value: String)) =>
         assert(map.size === 1)
-        assert(map.head === (key, value))
+        assert(map.head === ((key, value)))
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message