spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sro...@apache.org
Subject spark git commit: [SPARK-14756][CORE] Use parseLong instead of valueOf
Date Tue, 26 Apr 2016 10:49:09 GMT
Repository: spark
Updated Branches:
  refs/heads/master f70e4fff0 -> de6e63342


[SPARK-14756][CORE] Use parseLong instead of valueOf

## What changes were proposed in this pull request?

Use Long.parseLong which returns a primative.
Use a series of appends() reduces the creation of an extra StringBuilder type

## How was this patch tested?

Unit tests

Author: Azeem Jiva <azeemj@gmail.com>

Closes #12520 from javawithjiva/minor.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/de6e6334
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/de6e6334
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/de6e6334

Branch: refs/heads/master
Commit: de6e633420aba1fe5d806a2725a95e610699ae7d
Parents: f70e4ff
Author: Azeem Jiva <azeemj@gmail.com>
Authored: Tue Apr 26 11:49:04 2016 +0100
Committer: Sean Owen <sowen@cloudera.com>
Committed: Tue Apr 26 11:49:04 2016 +0100

----------------------------------------------------------------------
 .../java/org/apache/spark/unsafe/types/CalendarInterval.java | 8 ++++----
 .../main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala | 2 +-
 .../spark/examples/mllib/JavaStreamingTestExample.java       | 8 ++++----
 .../spark/sql/execution/datasources/PartitioningUtils.scala  | 6 +++---
 .../org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala   | 2 +-
 5 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/de6e6334/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java
----------------------------------------------------------------------
diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java
b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java
index 62edf6c..518ed64 100644
--- a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java
+++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java
@@ -62,7 +62,7 @@ public final class CalendarInterval implements Serializable {
     if (s == null) {
       return 0;
     } else {
-      return Long.valueOf(s);
+      return Long.parseLong(s);
     }
   }
 
@@ -91,7 +91,7 @@ public final class CalendarInterval implements Serializable {
       String s, long minValue, long maxValue) throws IllegalArgumentException {
     long result = 0;
     if (s != null) {
-      result = Long.valueOf(s);
+      result = Long.parseLong(s);
       if (result < minValue || result > maxValue) {
         throw new IllegalArgumentException(String.format("%s %d outside range [%d, %d]",
           fieldName, result, minValue, maxValue));
@@ -218,7 +218,7 @@ public final class CalendarInterval implements Serializable {
           result = new CalendarInterval(0, millisecond * MICROS_PER_MILLI);
 
         } else if (unit.equals("microsecond")) {
-          long micros = Long.valueOf(m.group(1));
+          long micros = Long.parseLong(m.group(1));
           result = new CalendarInterval(0, micros);
         }
       } catch (Exception e) {
@@ -318,7 +318,7 @@ public final class CalendarInterval implements Serializable {
 
   private void appendUnit(StringBuilder sb, long value, String unit) {
     if (value != 0) {
-      sb.append(" " + value + " " + unit + "s");
+      sb.append(' ').append(value).append(' ').append(unit).append('s');
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/de6e6334/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index cda9d38..2e9e45a 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -384,7 +384,7 @@ object SparkHadoopUtil {
 
   def get: SparkHadoopUtil = {
     // Check each time to support changing to/from YARN
-    val yarnMode = java.lang.Boolean.valueOf(
+    val yarnMode = java.lang.Boolean.parseBoolean(
         System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
     if (yarnMode) {
       yarn

http://git-wip-us.apache.org/repos/asf/spark/blob/de6e6334/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java
index df90199..cfaa577 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java
@@ -66,8 +66,8 @@ public class JavaStreamingTestExample {
     }
 
     String dataDir = args[0];
-    Duration batchDuration = Seconds.apply(Long.valueOf(args[1]));
-    int numBatchesTimeout = Integer.valueOf(args[2]);
+    Duration batchDuration = Seconds.apply(Long.parseLong(args[1]));
+    int numBatchesTimeout = Integer.parseInt(args[2]);
 
     SparkConf conf = new SparkConf().setMaster("local").setAppName("StreamingTestExample");
     JavaStreamingContext ssc = new JavaStreamingContext(conf, batchDuration);
@@ -80,8 +80,8 @@ public class JavaStreamingTestExample {
         @Override
         public BinarySample call(String line) {
           String[] ts = line.split(",");
-          boolean label = Boolean.valueOf(ts[0]);
-          double value = Double.valueOf(ts[1]);
+          boolean label = Boolean.parseBoolean(ts[0]);
+          double value = Double.parseDouble(ts[1]);
           return new BinarySample(label, value);
         }
       });

http://git-wip-us.apache.org/repos/asf/spark/blob/de6e6334/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
index 3ac2ff4..1065bb1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
@@ -441,9 +441,9 @@ private[sql] object PartitioningUtils {
       val c = path.charAt(i)
       if (c == '%' && i + 2 < path.length) {
         val code: Int = try {
-          Integer.valueOf(path.substring(i + 1, i + 3), 16)
-        } catch { case e: Exception =>
-          -1: Integer
+          Integer.parseInt(path.substring(i + 1, i + 3), 16)
+        } catch {
+          case _: Exception => -1
         }
         if (code >= 0) {
           sb.append(code.asInstanceOf[Char])

http://git-wip-us.apache.org/repos/asf/spark/blob/de6e6334/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
----------------------------------------------------------------------
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
index 4b36da3..ee002f6 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
@@ -345,7 +345,7 @@ object YarnSparkHadoopUtil {
   val RM_REQUEST_PRIORITY = Priority.newInstance(1)
 
   def get: YarnSparkHadoopUtil = {
-    val yarnMode = java.lang.Boolean.valueOf(
+    val yarnMode = java.lang.Boolean.parseBoolean(
       System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
     if (!yarnMode) {
       throw new SparkException("YarnSparkHadoopUtil is not available in non-YARN mode!")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message