spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [spark] cloud-fan commented on a change in pull request #26434: [SPARK-29544] [SQL] optimize skewed partition based on data size
Date Fri, 03 Jan 2020 15:31:31 GMT
cloud-fan commented on a change in pull request #26434: [SPARK-29544] [SQL] optimize skewed
partition based on data size
URL: https://github.com/apache/spark/pull/26434#discussion_r362850814
 
 

 ##########
 File path: sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ReduceNumShufflePartitions.scala
 ##########
 @@ -141,39 +149,36 @@ case class ReduceNumShufflePartitions(conf: SQLConf) extends Rule[SparkPlan]
{
       distinctNumPreShufflePartitions.length == 1,
       "There should be only one distinct value of the number pre-shuffle partitions " +
         "among registered Exchange operator.")
-    val numPreShufflePartitions = distinctNumPreShufflePartitions.head
 
     val partitionStartIndices = ArrayBuffer[Int]()
-    // The first element of partitionStartIndices is always 0.
-    partitionStartIndices += 0
-
-    var postShuffleInputSize = 0L
-
-    var i = 0
-    while (i < numPreShufflePartitions) {
-      // We calculate the total size of ith pre-shuffle partitions from all pre-shuffle stages.
-      // Then, we add the total size to postShuffleInputSize.
-      var nextShuffleInputSize = 0L
-      var j = 0
-      while (j < mapOutputStatistics.length) {
-        nextShuffleInputSize += mapOutputStatistics(j).bytesByPartitionId(i)
-        j += 1
-      }
-
-      // If including the nextShuffleInputSize would exceed the target partition size, then
start a
-      // new partition.
-      if (i > 0 && postShuffleInputSize + nextShuffleInputSize > targetPostShuffleInputSize)
{
-        partitionStartIndices += i
-        // reset postShuffleInputSize.
-        postShuffleInputSize = nextShuffleInputSize
-      } else {
-        postShuffleInputSize += nextShuffleInputSize
-      }
-
-      i += 1
+    val partitionEndIndices = ArrayBuffer[Int]()
+    val numPartitions = distinctNumPreShufflePartitions.head
+    val includedPartitions = (0 until numPartitions).filter(!excludedPartitions.contains(_))
+    val firstStartIndex = includedPartitions(0)
+    partitionStartIndices += firstStartIndex
+    var postShuffleInputSize = mapOutputStatistics.map(_.bytesByPartitionId(firstStartIndex)).sum
+    var i = firstStartIndex
+    includedPartitions.drop(1).foreach {
 
 Review comment:
   nit: 
   ```
   includedPartitions.drop(1).foreach { nextPartitionIndex =>
     ...
   }
   ```
   to reduce one level of indentation

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message