spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dav...@apache.org
Subject spark git commit: [SPARK-14419] [MINOR] coding style cleanup
Date Mon, 11 Apr 2016 01:10:52 GMT
Repository: spark
Updated Branches:
  refs/heads/master a7ce473bd -> fbf8d0088


[SPARK-14419] [MINOR] coding style cleanup

## What changes were proposed in this pull request?

Making them more consistent.

## How was this patch tested?

Existing tests.

Author: Davies Liu <davies@databricks.com>

Closes #12289 from davies/cleanup_style.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/fbf8d008
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/fbf8d008
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/fbf8d008

Branch: refs/heads/master
Commit: fbf8d008833c985d0e222dd2360c7f7375caa68a
Parents: a7ce473
Author: Davies Liu <davies@databricks.com>
Authored: Sun Apr 10 18:10:44 2016 -0700
Committer: Davies Liu <davies.liu@gmail.com>
Committed: Sun Apr 10 18:10:44 2016 -0700

----------------------------------------------------------------------
 .../execution/aggregate/TungstenAggregate.scala |  2 +-
 .../sql/execution/joins/HashedRelation.scala    | 35 +++++++-------------
 2 files changed, 13 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/fbf8d008/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala
index 692fef7..2535920 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala
@@ -454,7 +454,7 @@ case class TungstenAggregate(
     val thisPlan = ctx.addReferenceObj("plan", this)
     hashMapTerm = ctx.freshName("hashMap")
     val hashMapClassName = classOf[UnsafeFixedWidthAggregationMap].getName
-    ctx.addMutableState(hashMapClassName, hashMapTerm, s"")
+    ctx.addMutableState(hashMapClassName, hashMapTerm, "")
     sorterTerm = ctx.freshName("sorter")
     ctx.addMutableState(classOf[UnsafeKVExternalSorter].getName, sorterTerm, "")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/fbf8d008/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
index 68b5486..0427db4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
@@ -122,13 +122,12 @@ private[joins] class UnsafeHashedRelation(
 
   override def keyIsUnique: Boolean = binaryMap.numKeys() == binaryMap.numValues()
 
-  override def asReadOnlyCopy(): UnsafeHashedRelation =
+  override def asReadOnlyCopy(): UnsafeHashedRelation = {
     new UnsafeHashedRelation(numFields, binaryMap)
-
-  override def estimatedSize: Long = {
-    binaryMap.getTotalMemoryConsumption
   }
 
+  override def estimatedSize: Long = binaryMap.getTotalMemoryConsumption
+
   // re-used in get()/getValue()
   var resultRow = new UnsafeRow(numFields)
 
@@ -374,8 +373,9 @@ private[execution] final class LongToUnsafeRowMap(var mm: TaskMemoryManager,
cap
     // do not support spilling
     val got = mm.acquireExecutionMemory(size, MemoryMode.ON_HEAP, this)
     if (got < size) {
-      mm.releaseExecutionMemory(got, MemoryMode.ON_HEAP, this)
-      throw new SparkException(s"Can't acquire $size bytes memory to build hash relation")
+      freeMemory(got)
+      throw new SparkException(s"Can't acquire $size bytes memory to build hash relation,
" +
+        s"got $got bytes")
     }
   }
 
@@ -396,9 +396,7 @@ private[execution] final class LongToUnsafeRowMap(var mm: TaskMemoryManager,
cap
 
   init()
 
-  def spill(size: Long, trigger: MemoryConsumer): Long = {
-    0L
-  }
+  def spill(size: Long, trigger: MemoryConsumer): Long = 0L
 
   /**
    * Returns whether all the keys are unique.
@@ -408,9 +406,7 @@ private[execution] final class LongToUnsafeRowMap(var mm: TaskMemoryManager,
cap
   /**
    * Returns total memory consumption.
    */
-  def getTotalMemoryConsumption: Long = {
-    array.length * 8 + page.length
-  }
+  def getTotalMemoryConsumption: Long = array.length * 8 + page.length
 
   /**
    * Returns the first slot of array that store the keys (sparse mode).
@@ -423,9 +419,7 @@ private[execution] final class LongToUnsafeRowMap(var mm: TaskMemoryManager,
cap
   /**
    * Returns the next probe in the array.
    */
-  private def nextSlot(pos: Int): Int = {
-    (pos + 2) & mask
-  }
+  private def nextSlot(pos: Int): Int = (pos + 2) & mask
 
   private def getRow(address: Long, resultRow: UnsafeRow): UnsafeRow = {
     val offset = address >>> 32
@@ -674,9 +668,7 @@ private[joins] class LongHashedRelation(
 
   override def asReadOnlyCopy(): LongHashedRelation = new LongHashedRelation(nFields, map)
 
-  override def estimatedSize: Long = {
-    map.getTotalMemoryConsumption
-  }
+  override def estimatedSize: Long = map.getTotalMemoryConsumption
 
   override def get(key: InternalRow): Iterator[InternalRow] = {
     if (key.isNullAt(0)) {
@@ -694,12 +686,9 @@ private[joins] class LongHashedRelation(
     }
   }
 
-  override def get(key: Long): Iterator[InternalRow] =
-    map.get(key, resultRow)
+  override def get(key: Long): Iterator[InternalRow] = map.get(key, resultRow)
 
-  override def getValue(key: Long): InternalRow = {
-    map.getValue(key, resultRow)
-  }
+  override def getValue(key: Long): InternalRow = map.getValue(key, resultRow)
 
   override def keyIsUnique: Boolean = map.keyIsUnique
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message