spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sro...@apache.org
Subject [3/3] spark git commit: [SPARK-13423][WIP][CORE][SQL][STREAMING] Static analysis fixes for 2.x
Date Thu, 03 Mar 2016 09:54:21 GMT
[SPARK-13423][WIP][CORE][SQL][STREAMING] Static analysis fixes for 2.x

## What changes were proposed in this pull request?

Make some cross-cutting code improvements according to static analysis. These are individually up for discussion since they exist in separate commits that can be reverted. The changes are broadly:

- Inner class should be static
- Mismatched hashCode/equals
- Overflow in compareTo
- Unchecked warnings
- Misuse of assert, vs junit.assert
- get(a) + getOrElse(b) -> getOrElse(a,b)
- Array/String .size -> .length (occasionally, -> .isEmpty / .nonEmpty) to avoid implicit conversions
- Dead code
- tailrec
- exists(_ == ) -> contains find + nonEmpty -> exists filter + size -> count
- reduce(_+_) -> sum map + flatten -> map

The most controversial may be .size -> .length simply because of its size. It is intended to avoid implicits that might be expensive in some places.

## How was the this patch tested?

Existing Jenkins unit tests.

Author: Sean Owen <sowen@cloudera.com>

Closes #11292 from srowen/SPARK-13423.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e97fc7f1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e97fc7f1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e97fc7f1

Branch: refs/heads/master
Commit: e97fc7f176f8bf501c9b3afd8410014e3b0e1602
Parents: 02b7677
Author: Sean Owen <sowen@cloudera.com>
Authored: Thu Mar 3 09:54:09 2016 +0000
Committer: Sean Owen <sowen@cloudera.com>
Committed: Thu Mar 3 09:54:09 2016 +0000

----------------------------------------------------------------------
 .../network/ChunkFetchIntegrationSuite.java     |  2 +-
 .../network/RequestTimeoutIntegrationSuite.java | 12 +++----
 .../spark/network/RpcIntegrationSuite.java      |  2 +-
 .../network/TransportClientFactorySuite.java    |  7 ++--
 .../protocol/MessageWithHeaderSuite.java        |  6 ++--
 .../shuffle/protocol/mesos/RegisterDriver.java  |  8 +++++
 .../ExternalShuffleIntegrationSuite.java        |  2 +-
 .../shuffle/RetryingBlockFetcherSuite.java      |  2 +-
 .../types/UTF8StringPropertyCheckSuite.scala    |  2 +-
 .../shuffle/sort/ShuffleInMemorySorter.java     |  4 ++-
 .../unsafe/sort/UnsafeExternalSorter.java       |  2 +-
 .../scala/org/apache/spark/Dependency.scala     |  2 +-
 .../scala/org/apache/spark/Partitioner.scala    |  8 ++---
 .../scala/org/apache/spark/SparkContext.scala   |  7 ++--
 .../scala/org/apache/spark/TaskEndReason.scala  |  1 -
 .../apache/spark/deploy/ClientArguments.scala   |  2 ++
 .../org/apache/spark/deploy/SparkSubmit.scala   |  3 ++
 .../deploy/history/HistoryServerArguments.scala |  3 ++
 .../spark/deploy/master/MasterArguments.scala   |  3 ++
 .../spark/deploy/master/MasterSource.scala      |  4 +--
 .../master/ZooKeeperPersistenceEngine.scala     |  2 +-
 .../spark/deploy/master/ui/MasterPage.scala     | 10 +++---
 .../mesos/MesosClusterDispatcherArguments.scala |  3 ++
 .../deploy/rest/RestSubmissionClient.scala      |  6 ++--
 .../deploy/rest/mesos/MesosRestServer.scala     |  2 +-
 .../org/apache/spark/deploy/worker/Worker.scala |  7 ++--
 .../spark/deploy/worker/WorkerArguments.scala   |  3 ++
 .../partial/ApproximateActionListener.scala     |  2 +-
 .../apache/spark/rdd/LocalCheckpointRDD.scala   |  2 +-
 .../apache/spark/scheduler/DAGScheduler.scala   |  2 ++
 .../spark/scheduler/TaskSchedulerImpl.scala     |  2 +-
 .../apache/spark/scheduler/TaskSetManager.scala |  4 +--
 .../spark/serializer/JavaSerializer.scala       |  2 +-
 .../spark/status/api/v1/AllRDDResource.scala    |  2 +-
 .../spark/status/api/v1/OneJobResource.scala    |  2 +-
 .../org/apache/spark/storage/StorageUtils.scala |  6 ++--
 .../apache/spark/ui/ConsoleProgressBar.scala    |  2 +-
 .../org/apache/spark/ui/jobs/JobsTab.scala      |  2 +-
 .../org/apache/spark/ui/jobs/StagesTab.scala    |  2 +-
 .../ui/scope/RDDOperationGraphListener.scala    |  5 ++-
 .../apache/spark/ui/storage/StoragePage.scala   |  2 +-
 .../scala/org/apache/spark/util/Utils.scala     |  2 ++
 .../util/logging/RollingFileAppender.scala      |  2 +-
 .../spark/memory/TaskMemoryManagerSuite.java    | 36 ++++++++++----------
 .../sort/ShuffleInMemorySorterSuite.java        |  2 +-
 .../shuffle/sort/UnsafeShuffleWriterSuite.java  |  6 ++--
 .../unsafe/sort/UnsafeExternalSorterSuite.java  | 22 ++++++------
 .../unsafe/sort/UnsafeInMemorySorterSuite.java  |  3 +-
 .../org/apache/spark/AccumulatorSuite.scala     |  2 +-
 .../scala/org/apache/spark/SparkConfSuite.scala |  2 +-
 .../rdd/ParallelCollectionSplitSuite.scala      | 12 +++----
 .../scala/org/apache/spark/rdd/RDDSuite.scala   |  6 ++--
 .../apache/spark/scheduler/MapStatusSuite.scala |  2 +-
 .../spark/scheduler/TaskResultGetterSuite.scala |  2 +-
 .../BypassMergeSortShuffleWriterSuite.scala     |  2 +-
 .../apache/spark/util/SparkConfWithEnv.scala    |  4 +--
 .../examples/streaming/JavaActorWordCount.java  |  4 ++-
 .../spark/examples/DFSReadWriteTest.scala       |  4 +--
 .../spark/examples/DriverSubmissionTest.scala   |  2 +-
 .../spark/examples/MultiBroadcastTest.scala     |  2 +-
 .../org/apache/spark/examples/SparkALS.scala    |  2 +-
 .../spark/examples/mllib/LDAExample.scala       |  2 +-
 .../spark/examples/mllib/SampledRDDs.scala      |  2 +-
 .../streaming/clickstream/PageViewStream.scala  |  2 +-
 .../apache/spark/streaming/kafka/KafkaRDD.scala |  2 +-
 .../org/apache/spark/graphx/GraphOps.scala      |  2 +-
 .../org/apache/spark/graphx/VertexRDD.scala     |  6 ++--
 .../spark/graphx/impl/EdgePartition.scala       | 10 +++---
 .../graphx/impl/EdgePartitionBuilder.scala      | 16 ++++-----
 .../apache/spark/graphx/impl/EdgeRDDImpl.scala  |  2 +-
 .../apache/spark/graphx/impl/GraphImpl.scala    |  5 +--
 .../graphx/impl/RoutingTablePartition.scala     |  4 +--
 .../graphx/impl/ShippableVertexPartition.scala  |  2 +-
 .../apache/spark/graphx/lib/TriangleCount.scala |  2 +-
 .../apache/spark/graphx/VertexRDDSuite.scala    |  2 +-
 .../apache/spark/ml/feature/MinMaxScaler.scala  |  2 +-
 .../spark/ml/feature/QuantileDiscretizer.scala  |  4 +--
 .../spark/mllib/api/python/PythonMLLibAPI.scala |  4 +--
 .../impl/GLMClassificationModel.scala           |  2 +-
 .../mllib/clustering/BisectingKMeans.scala      |  2 ++
 .../spark/mllib/clustering/KMeansModel.scala    |  2 +-
 .../mllib/evaluation/MulticlassMetrics.scala    |  2 +-
 .../mllib/evaluation/MultilabelMetrics.scala    | 14 ++++----
 .../org/apache/spark/mllib/feature/IDF.scala    |  6 ++--
 .../apache/spark/mllib/feature/Normalizer.scala |  4 +--
 .../spark/mllib/feature/StandardScaler.scala    |  6 ++--
 .../apache/spark/mllib/feature/Word2Vec.scala   | 12 +++----
 .../org/apache/spark/mllib/fpm/FPGrowth.scala   |  2 +-
 .../org/apache/spark/mllib/linalg/BLAS.scala    | 10 +++---
 .../mllib/linalg/CholeskyDecomposition.scala    |  2 +-
 .../apache/spark/mllib/linalg/Matrices.scala    |  4 +--
 .../mllib/linalg/distributed/BlockMatrix.scala  |  2 +-
 .../linalg/distributed/IndexedRowMatrix.scala   |  4 +--
 .../apache/spark/mllib/recommendation/ALS.scala |  4 +--
 .../regression/GeneralizedLinearAlgorithm.scala |  2 +-
 .../regression/impl/GLMRegressionModel.scala    |  2 +-
 .../apache/spark/mllib/tree/DecisionTree.scala  |  8 +++--
 .../spark/mllib/tree/impurity/Variance.scala    |  4 +--
 .../mllib/tree/model/DecisionTreeModel.scala    |  4 +--
 .../spark/mllib/fpm/JavaFPGrowthSuite.java      |  4 ++-
 .../mllib/regression/RidgeRegressionSuite.scala |  2 +-
 .../spark/mllib/stat/StreamingTestSuite.scala   |  2 +-
 .../spark/sql/catalyst/analysis/Analyzer.scala  |  2 ++
 .../spark/sql/catalyst/expressions/Cast.scala   |  2 +-
 .../codegen/GenerateSafeProjection.scala        |  3 ++
 .../expressions/complexTypeCreator.scala        |  2 +-
 .../spark/sql/catalyst/expressions/misc.scala   |  3 ++
 .../sql/catalyst/expressions/objects.scala      |  2 ++
 .../sql/catalyst/optimizer/Optimizer.scala      |  2 ++
 .../spark/sql/catalyst/parser/ParseDriver.scala |  3 ++
 .../spark/sql/catalyst/util/DateTimeUtils.scala |  3 ++
 .../spark/sql/catalyst/util/package.scala       |  4 +--
 .../org/apache/spark/sql/types/StructType.scala |  2 +-
 .../spark/sql/RandomDataGeneratorSuite.scala    |  2 +-
 .../SubexpressionEliminationSuite.scala         | 10 +++---
 .../sql/execution/vectorized/ColumnarBatch.java |  1 -
 .../spark/sql/ContinuousQueryManager.scala      |  5 ++-
 .../org/apache/spark/sql/api/r/SQLUtils.scala   |  2 +-
 .../sql/execution/columnar/ColumnAccessor.scala |  3 ++
 .../sql/execution/columnar/ColumnType.scala     |  4 ++-
 .../columnar/InMemoryColumnarTableScan.scala    |  2 +-
 .../execution/datasources/csv/CSVRelation.scala | 10 +++---
 .../execution/datasources/jdbc/JDBCRDD.scala    |  6 ++--
 .../spark/sql/execution/ui/SQLListener.scala    |  2 +-
 .../apache/spark/sql/JavaDataFrameSuite.java    | 16 ++++-----
 .../org/apache/spark/sql/JavaDatasetSuite.java  | 21 +++++-------
 .../org/apache/spark/sql/DataFrameSuite.scala   |  2 +-
 .../org/apache/spark/sql/SQLQuerySuite.scala    | 15 ++++----
 .../apache/spark/sql/hive/HiveInspectors.scala  |  4 +--
 .../spark/sql/hive/HiveMetastoreCatalog.scala   |  5 ++-
 .../hive/execution/InsertIntoHiveTable.scala    |  2 +-
 .../apache/spark/streaming/DStreamGraph.scala   |  2 +-
 .../streaming/dstream/FileInputDStream.scala    |  2 +-
 .../dstream/ReceiverInputDStream.scala          |  4 +--
 .../dstream/ReducedWindowedDStream.scala        |  2 +-
 .../spark/streaming/dstream/UnionDStream.scala  |  6 ++--
 .../streaming/scheduler/JobGenerator.scala      |  4 +--
 .../streaming/scheduler/ReceiverTracker.scala   |  2 +-
 .../apache/spark/streaming/ui/BatchPage.scala   |  2 +-
 .../ui/StreamingJobProgressListener.scala       |  2 +-
 .../spark/streaming/ui/StreamingPage.scala      |  2 +-
 .../org/apache/spark/streaming/ui/UIUtils.scala |  2 +-
 .../spark/streaming/BasicOperationsSuite.scala  |  2 +-
 .../streaming/StreamingListenerSuite.scala      |  2 +-
 .../spark/streaming/UISeleniumSuite.scala       |  8 ++---
 .../streaming/util/WriteAheadLogSuite.scala     |  8 ++---
 .../apache/spark/deploy/yarn/ClientSuite.scala  |  2 +-
 147 files changed, 345 insertions(+), 293 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
index 70c849d..d17e986 100644
--- a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
+++ b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
@@ -132,7 +132,7 @@ public class ChunkFetchIntegrationSuite {
     testFile.delete();
   }
 
-  class FetchResult {
+  static class FetchResult {
     public Set<Integer> successChunks;
     public Set<Integer> failedChunks;
     public List<ManagedBuffer> buffers;

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java b/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java
index f9b5bf9..e2d026c 100644
--- a/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java
+++ b/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java
@@ -124,8 +124,8 @@ public class RequestTimeoutIntegrationSuite {
     synchronized (callback1) {
       client.sendRpc(ByteBuffer.allocate(0), callback1);
       callback1.wait(4 * 1000);
-      assert (callback1.failure != null);
-      assert (callback1.failure instanceof IOException);
+      assertNotNull(callback1.failure);
+      assertTrue(callback1.failure instanceof IOException);
     }
     semaphore.release();
   }
@@ -167,8 +167,8 @@ public class RequestTimeoutIntegrationSuite {
     synchronized (callback0) {
       client0.sendRpc(ByteBuffer.allocate(0), callback0);
       callback0.wait(FOREVER);
-      assert (callback0.failure instanceof IOException);
-      assert (!client0.isActive());
+      assertTrue(callback0.failure instanceof IOException);
+      assertFalse(client0.isActive());
     }
 
     // Increment the semaphore and the second request should succeed quickly.
@@ -236,7 +236,7 @@ public class RequestTimeoutIntegrationSuite {
 
     synchronized (callback1) {
       // failed at same time as previous
-      assert (callback0.failure instanceof IOException);
+      assertTrue(callback0.failure instanceof IOException);
     }
   }
 
@@ -244,7 +244,7 @@ public class RequestTimeoutIntegrationSuite {
    * Callback which sets 'success' or 'failure' on completion.
    * Additionally notifies all waiters on this callback when invoked.
    */
-  class TestCallback implements RpcResponseCallback, ChunkReceivedCallback {
+  static class TestCallback implements RpcResponseCallback, ChunkReceivedCallback {
 
     int successLength = -1;
     Throwable failure;

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
index 9e9be98..a7a99f3 100644
--- a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
+++ b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
@@ -91,7 +91,7 @@ public class RpcIntegrationSuite {
     clientFactory.close();
   }
 
-  class RpcResult {
+  static class RpcResult {
     public Set<String> successMessages;
     public Set<String> errorMessages;
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
index dac7d4a..9a89dd1 100644
--- a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
+++ b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
@@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Maps;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -95,7 +96,7 @@ public class TransportClientFactorySuite {
           try {
             TransportClient client =
               factory.createClient(TestUtils.getLocalHost(), server1.getPort());
-            assert (client.isActive());
+            assertTrue(client.isActive());
             clients.add(client);
           } catch (IOException e) {
             failed.incrementAndGet();
@@ -115,8 +116,8 @@ public class TransportClientFactorySuite {
       attempts[i].join();
     }
 
-    assert(failed.get() == 0);
-    assert(clients.size() == maxConnections);
+    Assert.assertEquals(0, failed.get());
+    Assert.assertEquals(clients.size(), maxConnections);
 
     for (TransportClient client : clients) {
       client.close();

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java b/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java
index fbbe4b7..b341c56 100644
--- a/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java
+++ b/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java
@@ -65,7 +65,7 @@ public class MessageWithHeaderSuite {
     assertEquals(42, result.readLong());
     assertEquals(84, result.readLong());
 
-    assert(msg.release());
+    assertTrue(msg.release());
     assertEquals(0, bodyPassedToNettyManagedBuffer.refCnt());
     assertEquals(0, header.refCnt());
   }
@@ -77,7 +77,7 @@ public class MessageWithHeaderSuite {
     ByteBuf body = (ByteBuf) managedBuf.convertToNetty();
     assertEquals(2, body.refCnt());
     MessageWithHeader msg = new MessageWithHeader(managedBuf, header, body, body.readableBytes());
-    assert(msg.release());
+    assertTrue(msg.release());
     Mockito.verify(managedBuf, Mockito.times(1)).release();
     assertEquals(0, body.refCnt());
   }
@@ -94,7 +94,7 @@ public class MessageWithHeaderSuite {
     for (long i = 0; i < 8; i++) {
       assertEquals(i, result.readLong());
     }
-    assert(msg.release());
+    assertTrue(msg.release());
   }
 
   private ByteBuf doWrite(MessageWithHeader msg, int minExpectedWrites) throws Exception {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java
----------------------------------------------------------------------
diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java
index 94a61d6..eeb0019 100644
--- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java
+++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/mesos/RegisterDriver.java
@@ -56,6 +56,14 @@ public class RegisterDriver extends BlockTransferMessage {
     return Objects.hashCode(appId);
   }
 
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof RegisterDriver)) {
+      return false;
+    }
+    return Objects.equal(appId, ((RegisterDriver) o).appId);
+  }
+
   public static RegisterDriver decode(ByteBuf buf) {
     String appId = Encoders.Strings.decode(buf);
     return new RegisterDriver(appId);

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
----------------------------------------------------------------------
diff --git a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
index 5e706bf..ecbbe7b 100644
--- a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
+++ b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
@@ -109,7 +109,7 @@ public class ExternalShuffleIntegrationSuite {
     handler.applicationRemoved(APP_ID, false /* cleanupLocalDirs */);
   }
 
-  class FetchResult {
+  static class FetchResult {
     public Set<String> successBlocks;
     public Set<String> failedBlocks;
     public List<ManagedBuffer> buffers;

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java
----------------------------------------------------------------------
diff --git a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java
index 3a6ef0d..91882e3 100644
--- a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java
+++ b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/RetryingBlockFetcherSuite.java
@@ -305,7 +305,7 @@ public class RetryingBlockFetcherSuite {
       }
     }
 
-    assert stub != null;
+    assertNotNull(stub);
     stub.when(fetchStarter).createAndStart((String[]) any(), (BlockFetchingListener) anyObject());
     String[] blockIdArray = blockIds.toArray(new String[blockIds.size()]);
     new RetryingBlockFetcher(conf, fetchStarter, blockIdArray, listener).start();

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala
----------------------------------------------------------------------
diff --git a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala
index b3bbd68..8a6b9e3 100644
--- a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala
+++ b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala
@@ -193,7 +193,7 @@ class UTF8StringPropertyCheckSuite extends FunSuite with GeneratorDrivenProperty
 
   test("concat") {
     def concat(orgin: Seq[String]): String =
-      if (orgin.exists(_ == null)) null else orgin.mkString
+      if (orgin.contains(null)) null else orgin.mkString
 
     forAll { (inputs: Seq[String]) =>
       assert(UTF8String.concat(inputs.map(toUTF8): _*) === toUTF8(inputs.mkString))

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java
index d74602c..2381cff 100644
--- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java
+++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java
@@ -30,7 +30,9 @@ final class ShuffleInMemorySorter {
   private static final class SortComparator implements Comparator<PackedRecordPointer> {
     @Override
     public int compare(PackedRecordPointer left, PackedRecordPointer right) {
-      return left.getPartitionId() - right.getPartitionId();
+      int leftId = left.getPartitionId();
+      int rightId = right.getPartitionId();
+      return leftId < rightId ? -1 : (leftId > rightId ? 1 : 0);
     }
   }
   private static final SortComparator SORT_COMPARATOR = new SortComparator();

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
index 296bf72..9236bd2 100644
--- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
+++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
@@ -550,7 +550,7 @@ public final class UnsafeExternalSorter extends MemoryConsumer {
   /**
    * Chain multiple UnsafeSorterIterator together as single one.
    */
-  class ChainedIterator extends UnsafeSorterIterator {
+  static class ChainedIterator extends UnsafeSorterIterator {
 
     private final Queue<UnsafeSorterIterator> iterators;
     private UnsafeSorterIterator current;

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/Dependency.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/Dependency.scala b/core/src/main/scala/org/apache/spark/Dependency.scala
index 9aafc9e..b65cfdc 100644
--- a/core/src/main/scala/org/apache/spark/Dependency.scala
+++ b/core/src/main/scala/org/apache/spark/Dependency.scala
@@ -88,7 +88,7 @@ class ShuffleDependency[K: ClassTag, V: ClassTag, C: ClassTag](
   val shuffleId: Int = _rdd.context.newShuffleId()
 
   val shuffleHandle: ShuffleHandle = _rdd.context.env.shuffleManager.registerShuffle(
-    shuffleId, _rdd.partitions.size, this)
+    shuffleId, _rdd.partitions.length, this)
 
   _rdd.sparkContext.cleaner.foreach(_.registerShuffleForCleanup(this))
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/Partitioner.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/Partitioner.scala b/core/src/main/scala/org/apache/spark/Partitioner.scala
index a7c2790..976c19f 100644
--- a/core/src/main/scala/org/apache/spark/Partitioner.scala
+++ b/core/src/main/scala/org/apache/spark/Partitioner.scala
@@ -55,14 +55,14 @@ object Partitioner {
    * We use two method parameters (rdd, others) to enforce callers passing at least 1 RDD.
    */
   def defaultPartitioner(rdd: RDD[_], others: RDD[_]*): Partitioner = {
-    val bySize = (Seq(rdd) ++ others).sortBy(_.partitions.size).reverse
+    val bySize = (Seq(rdd) ++ others).sortBy(_.partitions.length).reverse
     for (r <- bySize if r.partitioner.isDefined && r.partitioner.get.numPartitions > 0) {
       return r.partitioner.get
     }
     if (rdd.context.conf.contains("spark.default.parallelism")) {
       new HashPartitioner(rdd.context.defaultParallelism)
     } else {
-      new HashPartitioner(bySize.head.partitions.size)
+      new HashPartitioner(bySize.head.partitions.length)
     }
   }
 }
@@ -122,7 +122,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
       // This is the sample size we need to have roughly balanced output partitions, capped at 1M.
       val sampleSize = math.min(20.0 * partitions, 1e6)
       // Assume the input partitions are roughly balanced and over-sample a little bit.
-      val sampleSizePerPartition = math.ceil(3.0 * sampleSize / rdd.partitions.size).toInt
+      val sampleSizePerPartition = math.ceil(3.0 * sampleSize / rdd.partitions.length).toInt
       val (numItems, sketched) = RangePartitioner.sketch(rdd.map(_._1), sampleSizePerPartition)
       if (numItems == 0L) {
         Array.empty
@@ -137,7 +137,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
             imbalancedPartitions += idx
           } else {
             // The weight is 1 over the sampling probability.
-            val weight = (n.toDouble / sample.size).toFloat
+            val weight = (n.toDouble / sample.length).toFloat
             for (key <- sample) {
               candidates += ((key, weight))
             }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 0e8b735..b503c61 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -23,8 +23,8 @@ import java.net.URI
 import java.util.{Arrays, Properties, UUID}
 import java.util.concurrent.ConcurrentMap
 import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
-import java.util.UUID.randomUUID
 
+import scala.annotation.tailrec
 import scala.collection.JavaConverters._
 import scala.collection.Map
 import scala.collection.generic.Growable
@@ -391,8 +391,8 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
 
     _conf.set("spark.executor.id", SparkContext.DRIVER_IDENTIFIER)
 
-    _jars = _conf.getOption("spark.jars").map(_.split(",")).map(_.filter(_.size != 0)).toSeq.flatten
-    _files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.size != 0))
+    _jars = _conf.getOption("spark.jars").map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
+    _files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.nonEmpty))
       .toSeq.flatten
 
     _eventLogDir =
@@ -2310,6 +2310,7 @@ object SparkContext extends Logging {
    * Create a task scheduler based on a given master URL.
    * Return a 2-tuple of the scheduler backend and the task scheduler.
    */
+  @tailrec
   private def createTaskScheduler(
       sc: SparkContext,
       master: String,

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/TaskEndReason.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/TaskEndReason.scala b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
index c8f201e..509fb2e 100644
--- a/core/src/main/scala/org/apache/spark/TaskEndReason.scala
+++ b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
@@ -248,7 +248,6 @@ case class ExecutorLostFailure(
     } else {
       "unrelated to the running tasks"
     }
-    s"ExecutorLostFailure (executor ${execId} exited due to an issue ${exitBehavior})"
     s"ExecutorLostFailure (executor ${execId} exited ${exitBehavior})" +
       reason.map { r => s" Reason: $r" }.getOrElse("")
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
index 2554201..a86ee66 100644
--- a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
@@ -19,6 +19,7 @@ package org.apache.spark.deploy
 
 import java.net.{URI, URISyntaxException}
 
+import scala.annotation.tailrec
 import scala.collection.mutable.ListBuffer
 
 import org.apache.log4j.Level
@@ -49,6 +50,7 @@ private[deploy] class ClientArguments(args: Array[String]) {
 
   parse(args.toList)
 
+  @tailrec
   private def parse(args: List[String]): Unit = args match {
     case ("--cores" | "-c") :: IntParam(value) :: tail =>
       cores = value

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index d5a3383..7d7ddcc 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -22,6 +22,7 @@ import java.lang.reflect.{InvocationTargetException, Modifier, UndeclaredThrowab
 import java.net.URL
 import java.security.PrivilegedExceptionAction
 
+import scala.annotation.tailrec
 import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
 
 import org.apache.commons.lang3.StringUtils
@@ -150,6 +151,7 @@ object SparkSubmit {
    * Second, we use this launch environment to invoke the main method of the child
    * main class.
    */
+  @tailrec
   private def submit(args: SparkSubmitArguments): Unit = {
     val (childArgs, childClasspath, sysProps, childMainClass) = prepareSubmitEnvironment(args)
 
@@ -721,6 +723,7 @@ object SparkSubmit {
       throw new IllegalStateException("The main method in the given main class must be static")
     }
 
+    @tailrec
     def findCause(t: Throwable): Throwable = t match {
       case e: UndeclaredThrowableException =>
         if (e.getCause() != null) findCause(e.getCause()) else e

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
index d03bab3..fc3790f 100644
--- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.deploy.history
 
+import scala.annotation.tailrec
+
 import org.apache.spark.{Logging, SparkConf}
 import org.apache.spark.util.Utils
 
@@ -29,6 +31,7 @@ private[history] class HistoryServerArguments(conf: SparkConf, args: Array[Strin
 
   parse(args.toList)
 
+  @tailrec
   private def parse(args: List[String]): Unit = {
     if (args.length == 1) {
       setLogDirectory(args.head)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
index 44cefbc..9cd7458 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.deploy.master
 
+import scala.annotation.tailrec
+
 import org.apache.spark.SparkConf
 import org.apache.spark.util.{IntParam, Utils}
 
@@ -49,6 +51,7 @@ private[master] class MasterArguments(args: Array[String], conf: SparkConf) {
     webUiPort = conf.get("spark.master.ui.port").toInt
   }
 
+  @tailrec
   private def parse(args: List[String]): Unit = args match {
     case ("--ip" | "-i") :: value :: tail =>
       Utils.checkHost(value, "ip no longer supported, please use hostname " + value)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala b/core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
index 39b2647..fb07c39 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
@@ -32,7 +32,7 @@ private[spark] class MasterSource(val master: Master) extends Source {
 
   // Gauge for alive worker numbers in cluster
   metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{
-    override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size
+    override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE)
   })
 
   // Gauge for application numbers in cluster
@@ -42,6 +42,6 @@ private[spark] class MasterSource(val master: Master) extends Source {
 
   // Gauge for waiting application numbers in cluster
   metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] {
-    override def getValue: Int = master.apps.filter(_.state == ApplicationState.WAITING).size
+    override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING)
   })
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala b/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
index 540e802..b0cedef 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
@@ -50,7 +50,7 @@ private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer
 
   override def read[T: ClassTag](prefix: String): Seq[T] = {
     zk.getChildren.forPath(WORKING_DIR).asScala
-      .filter(_.startsWith(prefix)).map(deserializeFromFile[T]).flatten
+      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
   }
 
   override def close() {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala
index f9b0279..363f4b8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala
@@ -107,18 +107,18 @@ private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") {
                   </li>
                 }.getOrElse { Seq.empty }
               }
-              <li><strong>Alive Workers:</strong> {aliveWorkers.size}</li>
+              <li><strong>Alive Workers:</strong> {aliveWorkers.length}</li>
               <li><strong>Cores in use:</strong> {aliveWorkers.map(_.cores).sum} Total,
                 {aliveWorkers.map(_.coresUsed).sum} Used</li>
               <li><strong>Memory in use:</strong>
                 {Utils.megabytesToString(aliveWorkers.map(_.memory).sum)} Total,
                 {Utils.megabytesToString(aliveWorkers.map(_.memoryUsed).sum)} Used</li>
               <li><strong>Applications:</strong>
-                {state.activeApps.size} Running,
-                {state.completedApps.size} Completed </li>
+                {state.activeApps.length} Running,
+                {state.completedApps.length} Completed </li>
               <li><strong>Drivers:</strong>
-                {state.activeDrivers.size} Running,
-                {state.completedDrivers.size} Completed </li>
+                {state.activeDrivers.length} Running,
+                {state.completedDrivers.length} Completed </li>
               <li><strong>Status:</strong> {state.status}</li>
             </ul>
           </div>

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
index 5accaf7..38935e3 100644
--- a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.deploy.mesos
 
+import scala.annotation.tailrec
+
 import org.apache.spark.SparkConf
 import org.apache.spark.util.{IntParam, Utils}
 
@@ -34,6 +36,7 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
 
   propertiesFile = Utils.loadDefaultSparkProperties(conf, propertiesFile)
 
+  @tailrec
   private def parse(args: List[String]): Unit = args match {
     case ("--host" | "-h") :: value :: tail =>
       Utils.checkHost(value, "Please use hostname " + value)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala b/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
index 4ec6bfe..006e2e1 100644
--- a/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
@@ -382,7 +382,7 @@ private[spark] class RestSubmissionClient(master: String) extends Logging {
       logWarning(s"Unable to connect to server ${masterUrl}.")
       lostMasters += masterUrl
     }
-    lostMasters.size >= masters.size
+    lostMasters.size >= masters.length
   }
 }
 
@@ -412,13 +412,13 @@ private[spark] object RestSubmissionClient {
   }
 
   def main(args: Array[String]): Unit = {
-    if (args.size < 2) {
+    if (args.length < 2) {
       sys.error("Usage: RestSubmissionClient [app resource] [main class] [app args*]")
       sys.exit(1)
     }
     val appResource = args(0)
     val mainClass = args(1)
-    val appArgs = args.slice(2, args.size)
+    val appArgs = args.slice(2, args.length)
     val conf = new SparkConf
     val env = filterSystemEnvironment(sys.env)
     run(appResource, mainClass, appArgs, conf, env)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala b/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala
index a8b2f78..3b96488 100644
--- a/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala
@@ -94,7 +94,7 @@ private[mesos] class MesosSubmitRequestServlet(
     val driverCores = sparkProperties.get("spark.driver.cores")
     val appArgs = request.appArgs
     val environmentVariables = request.environmentVariables
-    val name = request.sparkProperties.get("spark.app.name").getOrElse(mainClass)
+    val name = request.sparkProperties.getOrElse("spark.app.name", mainClass)
 
     // Construct driver description
     val conf = new SparkConf(false).setAll(sparkProperties)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala
index df3c286..1c24c63 100755
--- a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala
@@ -148,7 +148,7 @@ private[deploy] class Worker(
   // time so that we can register with all masters.
   private val registerMasterThreadPool = ThreadUtils.newDaemonCachedThreadPool(
     "worker-register-master-threadpool",
-    masterRpcAddresses.size // Make sure we can register with all masters at the same time
+    masterRpcAddresses.length // Make sure we can register with all masters at the same time
   )
 
   var coresUsed = 0
@@ -445,13 +445,12 @@ private[deploy] class Worker(
           // Create local dirs for the executor. These are passed to the executor via the
           // SPARK_EXECUTOR_DIRS environment variable, and deleted by the Worker when the
           // application finishes.
-          val appLocalDirs = appDirectories.get(appId).getOrElse {
+          val appLocalDirs = appDirectories.getOrElse(appId,
             Utils.getOrCreateLocalRootDirs(conf).map { dir =>
               val appDir = Utils.createDirectory(dir, namePrefix = "executor")
               Utils.chmod700(appDir)
               appDir.getAbsolutePath()
-            }.toSeq
-          }
+            }.toSeq)
           appDirectories(appId) = appLocalDirs
           val manager = new ExecutorRunner(
             appId,

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
index de3c7cd..391eb41 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
@@ -19,6 +19,8 @@ package org.apache.spark.deploy.worker
 
 import java.lang.management.ManagementFactory
 
+import scala.annotation.tailrec
+
 import org.apache.spark.util.{IntParam, MemoryParam, Utils}
 import org.apache.spark.SparkConf
 
@@ -63,6 +65,7 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
 
   checkWorkerMemory()
 
+  @tailrec
   private def parse(args: List[String]): Unit = args match {
     case ("--ip" | "-i") :: value :: tail =>
       Utils.checkHost(value, "ip no longer supported, please use hostname " + value)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/partial/ApproximateActionListener.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/partial/ApproximateActionListener.scala b/core/src/main/scala/org/apache/spark/partial/ApproximateActionListener.scala
index d25452d..b089bbd 100644
--- a/core/src/main/scala/org/apache/spark/partial/ApproximateActionListener.scala
+++ b/core/src/main/scala/org/apache/spark/partial/ApproximateActionListener.scala
@@ -38,7 +38,7 @@ private[spark] class ApproximateActionListener[T, U, R](
   extends JobListener {
 
   val startTime = System.currentTimeMillis()
-  val totalTasks = rdd.partitions.size
+  val totalTasks = rdd.partitions.length
   var finishedTasks = 0
   var failure: Option[Exception] = None             // Set if the job has failed (permanently)
   var resultObject: Option[PartialResult[R]] = None // Set if we've already returned a PartialResult

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala b/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala
index bfe1919..a163bbd 100644
--- a/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala
@@ -41,7 +41,7 @@ private[spark] class LocalCheckpointRDD[T: ClassTag](
   extends CheckpointRDD[T](sc) {
 
   def this(rdd: RDD[T]) {
-    this(rdd.context, rdd.id, rdd.partitions.size)
+    this(rdd.context, rdd.id, rdd.partitions.length)
   }
 
   protected override def getPartitions: Array[Partition] = {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
index ba773e1..e2eaef5 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
@@ -22,6 +22,7 @@ import java.util.Properties
 import java.util.concurrent.TimeUnit
 import java.util.concurrent.atomic.AtomicInteger
 
+import scala.annotation.tailrec
 import scala.collection.Map
 import scala.collection.mutable.{HashMap, HashSet, Stack}
 import scala.concurrent.Await
@@ -469,6 +470,7 @@ class DAGScheduler(
    * all of that stage's ancestors.
    */
   private def updateJobIdStageIdMaps(jobId: Int, stage: Stage): Unit = {
+    @tailrec
     def updateJobIdStageIdMapsList(stages: List[Stage]) {
       if (stages.nonEmpty) {
         val s = stages.head

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 29341df..8b2f497 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -620,7 +620,7 @@ private[spark] object TaskSchedulerImpl {
     while (found) {
       found = false
       for (key <- keyList) {
-        val containerList: ArrayBuffer[T] = map.get(key).getOrElse(null)
+        val containerList: ArrayBuffer[T] = map.getOrElse(key, null)
         assert(containerList != null)
         // Get the index'th entry for this host - if present
         if (index < containerList.size){

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index 2b0eab7..f1339d5 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -202,7 +202,7 @@ private[spark] class TaskSetManager(
                 ", but there are no executors alive there.")
           }
         }
-        case _ => Unit
+        case _ =>
       }
       pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
       for (rack <- sched.getRackForHost(loc.host)) {
@@ -828,7 +828,7 @@ private[spark] class TaskSetManager(
       val time = clock.getTimeMillis()
       val durations = taskInfos.values.filter(_.successful).map(_.duration).toArray
       Arrays.sort(durations)
-      val medianDuration = durations(min((0.5 * tasksSuccessful).round.toInt, durations.size - 1))
+      val medianDuration = durations(min((0.5 * tasksSuccessful).round.toInt, durations.length - 1))
       val threshold = max(SPECULATION_MULTIPLIER * medianDuration, 100)
       // TODO: Threshold should also look at standard deviation of task durations and have a lower
       // bound based on that.

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
index ea718a0..8b72da2 100644
--- a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
@@ -68,7 +68,7 @@ private[spark] class JavaDeserializationStream(in: InputStream, loader: ClassLoa
         // scalastyle:on classforname
       } catch {
         case e: ClassNotFoundException =>
-          JavaDeserializationStream.primitiveMappings.get(desc.getName).getOrElse(throw e)
+          JavaDeserializationStream.primitiveMappings.getOrElse(desc.getName, throw e)
       }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/status/api/v1/AllRDDResource.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/AllRDDResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/AllRDDResource.scala
index 7750a09..5c03609 100644
--- a/core/src/main/scala/org/apache/spark/status/api/v1/AllRDDResource.scala
+++ b/core/src/main/scala/org/apache/spark/status/api/v1/AllRDDResource.scala
@@ -61,7 +61,7 @@ private[spark] object AllRDDResource {
       .flatMap { _.rddBlocksById(rddId) }
       .sortWith { _._1.name < _._1.name }
       .map { case (blockId, status) =>
-        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
+        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
       }
 
     val dataDistribution = if (includeDetails) {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/status/api/v1/OneJobResource.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/OneJobResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/OneJobResource.scala
index a0f6360..6531503 100644
--- a/core/src/main/scala/org/apache/spark/status/api/v1/OneJobResource.scala
+++ b/core/src/main/scala/org/apache/spark/status/api/v1/OneJobResource.scala
@@ -30,7 +30,7 @@ private[v1] class OneJobResource(ui: SparkUI) {
   def oneJob(@PathParam("jobId") jobId: Int): JobData = {
     val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
       AllJobsResource.getStatusToJobs(ui)
-    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
+    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
     jobOpt.map { job =>
       AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
     }.getOrElse {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala b/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala
index 8e2cfb2..43cd159 100644
--- a/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala
+++ b/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala
@@ -82,9 +82,7 @@ class StorageStatus(val blockManagerId: BlockManagerId, val maxMem: Long) {
   def rddBlocks: Map[BlockId, BlockStatus] = _rddBlocks.flatMap { case (_, blocks) => blocks }
 
   /** Return the blocks that belong to the given RDD stored in this block manager. */
-  def rddBlocksById(rddId: Int): Map[BlockId, BlockStatus] = {
-    _rddBlocks.get(rddId).getOrElse(Map.empty)
-  }
+  def rddBlocksById(rddId: Int): Map[BlockId, BlockStatus] = _rddBlocks.getOrElse(rddId, Map.empty)
 
   /** Add the given block to this storage status. If it already exists, overwrite it. */
   private[spark] def addBlock(blockId: BlockId, blockStatus: BlockStatus): Unit = {
@@ -143,7 +141,7 @@ class StorageStatus(val blockManagerId: BlockManagerId, val maxMem: Long) {
   def getBlock(blockId: BlockId): Option[BlockStatus] = {
     blockId match {
       case RDDBlockId(rddId, _) =>
-        _rddBlocks.get(rddId).map(_.get(blockId)).flatten
+        _rddBlocks.get(rddId).flatMap(_.get(blockId))
       case _ =>
         _nonRddBlocks.get(blockId)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
index 77c0bc8..f157a45 100644
--- a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
+++ b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
@@ -63,7 +63,7 @@ private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
       return
     }
     val stageIds = sc.statusTracker.getActiveStageIds()
-    val stages = stageIds.map(sc.statusTracker.getStageInfo).flatten.filter(_.numTasks() > 1)
+    val stages = stageIds.flatMap(sc.statusTracker.getStageInfo).filter(_.numTasks() > 1)
       .filter(now - _.submissionTime() > FIRST_DELAY).sortBy(_.stageId())
     if (stages.length > 0) {
       show(now, stages.take(3))  // display at most 3 stages in same time

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/ui/jobs/JobsTab.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/JobsTab.scala b/core/src/main/scala/org/apache/spark/ui/jobs/JobsTab.scala
index 77ca60b..2fd630a 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/JobsTab.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/JobsTab.scala
@@ -29,7 +29,7 @@ private[ui] class JobsTab(parent: SparkUI) extends SparkUITab(parent, "jobs") {
   val operationGraphListener = parent.operationGraphListener
 
   def isFairScheduler: Boolean =
-    jobProgresslistener.schedulingMode.exists(_ == SchedulingMode.FAIR)
+    jobProgresslistener.schedulingMode.contains(SchedulingMode.FAIR)
 
   attachPage(new AllJobsPage(this))
   attachPage(new JobPage(this))

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala
index 5989f00..ece5d0f 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala
@@ -34,7 +34,7 @@ private[ui] class StagesTab(parent: SparkUI) extends SparkUITab(parent, "stages"
   attachPage(new StagePage(this))
   attachPage(new PoolPage(this))
 
-  def isFairScheduler: Boolean = progressListener.schedulingMode.exists(_ == SchedulingMode.FAIR)
+  def isFairScheduler: Boolean = progressListener.schedulingMode.contains(SchedulingMode.FAIR)
 
   def handleKillRequest(request: HttpServletRequest): Unit = {
     if (killEnabled && parent.securityManager.checkModifyPermissions(request.getRemoteUser)) {

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala
index 89119cd..bcae56e 100644
--- a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala
+++ b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala
@@ -52,9 +52,8 @@ private[ui] class RDDOperationGraphListener(conf: SparkConf) extends SparkListen
    * An empty list is returned if one or more of its stages has been cleaned up.
    */
   def getOperationGraphForJob(jobId: Int): Seq[RDDOperationGraph] = synchronized {
-    val skippedStageIds = jobIdToSkippedStageIds.get(jobId).getOrElse(Seq.empty)
-    val graphs = jobIdToStageIds.get(jobId)
-      .getOrElse(Seq.empty)
+    val skippedStageIds = jobIdToSkippedStageIds.getOrElse(jobId, Seq.empty)
+    val graphs = jobIdToStageIds.getOrElse(jobId, Seq.empty)
       .flatMap { sid => stageIdToGraph.get(sid) }
     // Mark any skipped stages as such
     graphs.foreach { g =>

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
index c9bb49b..76d7c6d 100644
--- a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
@@ -156,7 +156,7 @@ private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") {
       streamBlockTableSubrow(block._1, replications.head, replications.size, true)
     } else {
       streamBlockTableSubrow(block._1, replications.head, replications.size, true) ++
-        replications.tail.map(streamBlockTableSubrow(block._1, _, replications.size, false)).flatten
+        replications.tail.flatMap(streamBlockTableSubrow(block._1, _, replications.size, false))
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/util/Utils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala
index cfe247c..9688cca 100644
--- a/core/src/main/scala/org/apache/spark/util/Utils.scala
+++ b/core/src/main/scala/org/apache/spark/util/Utils.scala
@@ -27,6 +27,7 @@ import java.util.{Locale, Properties, Random, UUID}
 import java.util.concurrent._
 import javax.net.ssl.HttpsURLConnection
 
+import scala.annotation.tailrec
 import scala.collection.JavaConverters._
 import scala.collection.Map
 import scala.collection.mutable.ArrayBuffer
@@ -2219,6 +2220,7 @@ private[spark] object Utils extends Logging {
   /**
    * Return whether the specified file is a parent directory of the child file.
    */
+  @tailrec
   def isInDirectory(parent: File, child: File): Boolean = {
     if (child == null || parent == null) {
       return false

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/main/scala/org/apache/spark/util/logging/RollingFileAppender.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/util/logging/RollingFileAppender.scala b/core/src/main/scala/org/apache/spark/util/logging/RollingFileAppender.scala
index 050ece1..a0eb05c 100644
--- a/core/src/main/scala/org/apache/spark/util/logging/RollingFileAppender.scala
+++ b/core/src/main/scala/org/apache/spark/util/logging/RollingFileAppender.scala
@@ -117,7 +117,7 @@ private[spark] class RollingFileAppender(
         }
       }).sorted
       val filesToBeDeleted = rolledoverFiles.take(
-        math.max(0, rolledoverFiles.size - maxRetainedFiles))
+        math.max(0, rolledoverFiles.length - maxRetainedFiles))
       filesToBeDeleted.foreach { file =>
         logInfo(s"Deleting file executor log file ${file.getAbsolutePath}")
         file.delete()

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java b/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
index 776a299..127789b 100644
--- a/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
+++ b/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
@@ -73,37 +73,37 @@ public class TaskMemoryManagerSuite {
     TestMemoryConsumer c1 = new TestMemoryConsumer(manager);
     TestMemoryConsumer c2 = new TestMemoryConsumer(manager);
     c1.use(100);
-    assert(c1.getUsed() == 100);
+    Assert.assertEquals(100, c1.getUsed());
     c2.use(100);
-    assert(c2.getUsed() == 100);
-    assert(c1.getUsed() == 0);  // spilled
+    Assert.assertEquals(100, c2.getUsed());
+    Assert.assertEquals(0, c1.getUsed());  // spilled
     c1.use(100);
-    assert(c1.getUsed() == 100);
-    assert(c2.getUsed() == 0);  // spilled
+    Assert.assertEquals(100, c1.getUsed());
+    Assert.assertEquals(0, c2.getUsed());  // spilled
 
     c1.use(50);
-    assert(c1.getUsed() == 50);  // spilled
-    assert(c2.getUsed() == 0);
+    Assert.assertEquals(50, c1.getUsed());  // spilled
+    Assert.assertEquals(0, c2.getUsed());
     c2.use(50);
-    assert(c1.getUsed() == 50);
-    assert(c2.getUsed() == 50);
+    Assert.assertEquals(50, c1.getUsed());
+    Assert.assertEquals(50, c2.getUsed());
 
     c1.use(100);
-    assert(c1.getUsed() == 100);
-    assert(c2.getUsed() == 0);  // spilled
+    Assert.assertEquals(100, c1.getUsed());
+    Assert.assertEquals(0, c2.getUsed());  // spilled
 
     c1.free(20);
-    assert(c1.getUsed() == 80);
+    Assert.assertEquals(80, c1.getUsed());
     c2.use(10);
-    assert(c1.getUsed() == 80);
-    assert(c2.getUsed() == 10);
+    Assert.assertEquals(80, c1.getUsed());
+    Assert.assertEquals(10, c2.getUsed());
     c2.use(100);
-    assert(c2.getUsed() == 100);
-    assert(c1.getUsed() == 0);  // spilled
+    Assert.assertEquals(100, c2.getUsed());
+    Assert.assertEquals(0, c1.getUsed());  // spilled
 
     c1.free(0);
     c2.free(100);
-    assert(manager.cleanUpAllAllocatedMemory() == 0);
+    Assert.assertEquals(0, manager.cleanUpAllAllocatedMemory());
   }
 
   @Test
@@ -114,7 +114,7 @@ public class TaskMemoryManagerSuite {
       .set("spark.unsafe.offHeap", "true")
       .set("spark.memory.offHeap.size", "1000");
     final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0);
-    assert(manager.tungstenMemoryMode == MemoryMode.OFF_HEAP);
+    Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
index eb1da8e..b4fa33f 100644
--- a/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
+++ b/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
@@ -48,7 +48,7 @@ public class ShuffleInMemorySorterSuite {
   public void testSortingEmptyInput() {
     final ShuffleInMemorySorter sorter = new ShuffleInMemorySorter(consumer, 100);
     final ShuffleInMemorySorter.ShuffleSorterIterator iter = sorter.getSortedIterator();
-    assert(!iter.hasNext());
+    Assert.assertFalse(iter.hasNext());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index 876c3a2..add9d93 100644
--- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -139,7 +139,7 @@ public class UnsafeShuffleWriterSuite {
       new Answer<InputStream>() {
         @Override
         public InputStream answer(InvocationOnMock invocation) throws Throwable {
-          assert (invocation.getArguments()[0] instanceof TempShuffleBlockId);
+          assertTrue(invocation.getArguments()[0] instanceof TempShuffleBlockId);
           InputStream is = (InputStream) invocation.getArguments()[1];
           if (conf.getBoolean("spark.shuffle.compress", true)) {
             return CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(is);
@@ -154,7 +154,7 @@ public class UnsafeShuffleWriterSuite {
       new Answer<OutputStream>() {
         @Override
         public OutputStream answer(InvocationOnMock invocation) throws Throwable {
-          assert (invocation.getArguments()[0] instanceof TempShuffleBlockId);
+          assertTrue(invocation.getArguments()[0] instanceof TempShuffleBlockId);
           OutputStream os = (OutputStream) invocation.getArguments()[1];
           if (conf.getBoolean("spark.shuffle.compress", true)) {
             return CompressionCodec$.MODULE$.createCodec(conf).compressedOutputStream(os);
@@ -252,7 +252,7 @@ public class UnsafeShuffleWriterSuite {
     createWriter(false).stop(false);
   }
 
-  class PandaException extends RuntimeException {
+  static class PandaException extends RuntimeException {
   }
 
   @Test(expected=PandaException.class)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
index 32f5a1a..492fe49 100644
--- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
+++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
@@ -323,23 +323,23 @@ public class UnsafeExternalSorterSuite {
       record[0] = (long) i;
       sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0);
     }
-    assert(sorter.getNumberOfAllocatedPages() >= 2);
+    assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
     UnsafeExternalSorter.SpillableIterator iter =
       (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
     int lastv = 0;
     for (int i = 0; i < n / 3; i++) {
       iter.hasNext();
       iter.loadNext();
-      assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+      assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
       lastv = i;
     }
-    assert(iter.spill() > 0);
-    assert(iter.spill() == 0);
-    assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
+    assertTrue(iter.spill() > 0);
+    assertEquals(0, iter.spill());
+    assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
     for (int i = n / 3; i < n; i++) {
       iter.hasNext();
       iter.loadNext();
-      assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+      assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
     }
     sorter.cleanupResources();
     assertSpillFilesWereCleanedUp();
@@ -355,15 +355,15 @@ public class UnsafeExternalSorterSuite {
       record[0] = (long) i;
       sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0);
     }
-    assert(sorter.getNumberOfAllocatedPages() >= 2);
+    assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
     UnsafeExternalSorter.SpillableIterator iter =
       (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
-    assert(iter.spill() > 0);
-    assert(iter.spill() == 0);
+    assertTrue(iter.spill() > 0);
+    assertEquals(0, iter.spill());
     for (int i = 0; i < n; i++) {
       iter.hasNext();
       iter.loadNext();
-      assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+      assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
     }
     sorter.cleanupResources();
     assertSpillFilesWereCleanedUp();
@@ -394,7 +394,7 @@ public class UnsafeExternalSorterSuite {
     for (int i = 0; i < n; i++) {
       iter.hasNext();
       iter.loadNext();
-      assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+      assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
     }
     sorter.cleanupResources();
     assertSpillFilesWereCleanedUp();

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
index 8e557ec..ff41768 100644
--- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
+++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
@@ -19,6 +19,7 @@ package org.apache.spark.util.collection.unsafe.sort;
 
 import java.util.Arrays;
 
+import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.spark.HashPartitioner;
@@ -54,7 +55,7 @@ public class UnsafeInMemorySorterSuite {
       mock(PrefixComparator.class),
       100);
     final UnsafeSorterIterator iter = sorter.getSortedIterator();
-    assert(!iter.hasNext());
+    Assert.assertFalse(iter.hasNext());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 8acd043..4ff8ae5 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -347,7 +347,7 @@ private class SaveInfoListener extends SparkListener {
   def getCompletedStageInfos: Seq[StageInfo] = completedStageInfos.toArray.toSeq
   def getCompletedTaskInfos: Seq[TaskInfo] = completedTaskInfos.values.flatten.toSeq
   def getCompletedTaskInfos(stageId: StageId, stageAttemptId: StageAttemptId): Seq[TaskInfo] =
-    completedTaskInfos.get((stageId, stageAttemptId)).getOrElse(Seq.empty[TaskInfo])
+    completedTaskInfos.getOrElse((stageId, stageAttemptId), Seq.empty[TaskInfo])
 
   /**
    * If `jobCompletionCallback` is set, block until the next call has finished.

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
index 2fe99e3..79881f3 100644
--- a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
@@ -237,7 +237,7 @@ class SparkConfSuite extends SparkFunSuite with LocalSparkContext with ResetSyst
     conf.set(newName, "4")
     assert(conf.get(newName) === "4")
 
-    val count = conf.getAll.filter { case (k, v) => k.startsWith("spark.history.") }.size
+    val count = conf.getAll.count { case (k, v) => k.startsWith("spark.history.") }
     assert(count === 4)
 
     conf.set("spark.yarn.applicationMaster.waitTries", "42")

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
index e7cc161..31ce948 100644
--- a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
@@ -101,7 +101,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1 until 100
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+    assert(slices.map(_.size).sum === 99)
     assert(slices.forall(_.isInstanceOf[Range]))
   }
 
@@ -109,7 +109,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1 to 100
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+    assert(slices.map(_.size).sum === 100)
     assert(slices.forall(_.isInstanceOf[Range]))
   }
 
@@ -202,7 +202,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1L until 100L
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+    assert(slices.map(_.size).sum === 99)
     assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
   }
 
@@ -210,7 +210,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1L to 100L
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+    assert(slices.map(_.size).sum === 100)
     assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
   }
 
@@ -218,7 +218,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1.0 until 100.0 by 1.0
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+    assert(slices.map(_.size).sum === 99)
     assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
   }
 
@@ -226,7 +226,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val data = 1.0 to 100.0 by 1.0
     val slices = ParallelCollectionRDD.slice(data, 3)
     assert(slices.size === 3)
-    assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+    assert(slices.map(_.size).sum === 100)
     assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
index 80347b8..24daeda 100644
--- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
@@ -54,16 +54,16 @@ class RDDSuite extends SparkFunSuite with SharedSparkContext {
     assert(!nums.isEmpty())
     assert(nums.max() === 4)
     assert(nums.min() === 1)
-    val partitionSums = nums.mapPartitions(iter => Iterator(iter.reduceLeft(_ + _)))
+    val partitionSums = nums.mapPartitions(iter => Iterator(iter.sum))
     assert(partitionSums.collect().toList === List(3, 7))
 
     val partitionSumsWithSplit = nums.mapPartitionsWithIndex {
-      case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+      case(split, iter) => Iterator((split, iter.sum))
     }
     assert(partitionSumsWithSplit.collect().toList === List((0, 3), (1, 7)))
 
     val partitionSumsWithIndex = nums.mapPartitionsWithIndex {
-      case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+      case(split, iter) => Iterator((split, iter.sum))
     }
     assert(partitionSumsWithIndex.collect().toList === List((0, 3), (1, 7)))
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
index 56e0f01..759d52f 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
@@ -79,7 +79,7 @@ class MapStatusSuite extends SparkFunSuite {
 
   test("HighlyCompressedMapStatus: estimated size should be the average non-empty block size") {
     val sizes = Array.tabulate[Long](3000) { i => i.toLong }
-    val avg = sizes.sum / sizes.filter(_ != 0).length
+    val avg = sizes.sum / sizes.count(_ != 0)
     val loc = BlockManagerId("a", "b", 10)
     val status = MapStatus(loc, sizes)
     val status1 = compressAndDecompressMapStatus(status)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
index b5385c1..935e280 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
@@ -243,7 +243,7 @@ class TaskResultGetterSuite extends SparkFunSuite with BeforeAndAfter with Local
     val resAfter = captor.getValue
     val resSizeBefore = resBefore.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
     val resSizeAfter = resAfter.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
-    assert(resSizeBefore.exists(_ == 0L))
+    assert(resSizeBefore.contains(0L))
     assert(resSizeAfter.exists(_.toString.toLong > 0L))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
index fdacd8c..cf9f9da 100644
--- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
@@ -166,7 +166,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte
     writer.stop( /* success = */ true)
     assert(temporaryFilesCreated.nonEmpty)
     assert(writer.getPartitionLengths.sum === outputFile.length())
-    assert(writer.getPartitionLengths.filter(_ == 0L).size === 4) // should be 4 zero length files
+    assert(writer.getPartitionLengths.count(_ == 0L) === 4) // should be 4 zero length files
     assert(temporaryFilesCreated.count(_.exists()) === 0) // check that temporary files were deleted
     val shuffleWriteMetrics = taskContext.taskMetrics().shuffleWriteMetrics.get
     assert(shuffleWriteMetrics.bytesWritten === outputFile.length())

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
index ddd5edf..0c8b8cf 100644
--- a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
+++ b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
@@ -23,9 +23,7 @@ import org.apache.spark.SparkConf
  * Customized SparkConf that allows env variables to be overridden.
  */
 class SparkConfWithEnv(env: Map[String, String]) extends SparkConf(false) {
-  override def getenv(name: String): String = {
-    env.get(name).getOrElse(super.getenv(name))
-  }
+  override def getenv(name: String): String = env.getOrElse(name, super.getenv(name))
 
   override def clone: SparkConf = {
     new SparkConfWithEnv(env).setAll(getAll)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
index cf77466..7bb70d0 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
@@ -60,7 +60,9 @@ class JavaSampleActorReceiver<T> extends JavaActorReceiver {
 
   @Override
   public void onReceive(Object msg) throws Exception {
-    store((T) msg);
+    @SuppressWarnings("unchecked")
+    T msgT = (T) msg;
+    store(msgT);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
index b26db0b..e37a3fa 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
@@ -88,7 +88,7 @@ object DFSReadWriteTest {
   def runLocalWordCount(fileContents: List[String]): Int = {
     fileContents.flatMap(_.split(" "))
       .flatMap(_.split("\t"))
-      .filter(_.size > 0)
+      .filter(_.nonEmpty)
       .groupBy(w => w)
       .mapValues(_.size)
       .values
@@ -119,7 +119,7 @@ object DFSReadWriteTest {
     val dfsWordCount = readFileRDD
       .flatMap(_.split(" "))
       .flatMap(_.split("\t"))
-      .filter(_.size > 0)
+      .filter(_.nonEmpty)
       .map(w => (w, 1))
       .countByKey()
       .values

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
index bec61f3..a2d59a1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
@@ -26,7 +26,7 @@ import org.apache.spark.util.Utils
   * test driver submission in the standalone scheduler. */
 object DriverSubmissionTest {
   def main(args: Array[String]) {
-    if (args.size < 1) {
+    if (args.length < 1) {
       println("Usage: DriverSubmissionTest <seconds-to-sleep>")
       System.exit(0)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
index a797111..134c3d1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
@@ -46,7 +46,7 @@ object MultiBroadcastTest {
     val barr1 = sc.broadcast(arr1)
     val barr2 = sc.broadcast(arr2)
     val observedSizes: RDD[(Int, Int)] = sc.parallelize(1 to 10, slices).map { _ =>
-      (barr1.value.size, barr2.value.size)
+      (barr1.value.length, barr2.value.length)
     }
     // Collect the small RDD so we can print the observed sizes locally.
     observedSizes.collect().foreach(i => println(i))

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
index 69799b7..4263680 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
@@ -58,7 +58,7 @@ object SparkALS {
   }
 
   def update(i: Int, m: RealVector, us: Array[RealVector], R: RealMatrix) : RealVector = {
-    val U = us.size
+    val U = us.length
     val F = us(0).getDimension
     var XtX: RealMatrix = new Array2DRowRealMatrix(F, F)
     var Xty: RealVector = new ArrayRealVector(F)

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index 038b2fe..e89d555 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -118,7 +118,7 @@ object LDAExample {
       preprocess(sc, params.input, params.vocabSize, params.stopwordFile)
     corpus.cache()
     val actualCorpusSize = corpus.count()
-    val actualVocabSize = vocabArray.size
+    val actualVocabSize = vocabArray.length
     val preprocessElapsed = (System.nanoTime() - preprocessStart) / 1e9
 
     println()

http://git-wip-us.apache.org/repos/asf/spark/blob/e97fc7f1/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
index c4e5e96..011db4f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
@@ -79,7 +79,7 @@ object SampledRDDs {
     val sampledRDD = examples.sample(withReplacement = true, fraction = fraction)
     println(s"  RDD.sample(): sample has ${sampledRDD.count()} examples")
     val sampledArray = examples.takeSample(withReplacement = true, num = expectedSampleSize)
-    println(s"  RDD.takeSample(): sample has ${sampledArray.size} examples")
+    println(s"  RDD.takeSample(): sample has ${sampledArray.length} examples")
 
     println()
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message