spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiangxb1987 <...@git.apache.org>
Subject [GitHub] spark pull request #18487: [SPARK-21243][Core] Limit no. of map outputs in a...
Date Thu, 06 Jul 2017 14:47:14 GMT
Github user jiangxb1987 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/18487#discussion_r125916755
  
    --- Diff: core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala
---
    @@ -433,12 +449,57 @@ final class ShuffleBlockFetcherIterator(
       }
     
       private def fetchUpToMaxBytes(): Unit = {
    -    // Send fetch requests up to maxBytesInFlight
    -    while (fetchRequests.nonEmpty &&
    -      (bytesInFlight == 0 ||
    -        (reqsInFlight + 1 <= maxReqsInFlight &&
    -          bytesInFlight + fetchRequests.front.size <= maxBytesInFlight))) {
    -      sendRequest(fetchRequests.dequeue())
    +    // Send fetch requests up to maxBytesInFlight. If you cannot fetch from a remote
host
    +    // immediately, defer the request until the next time it can be processed.
    +
    +    // Process any outstanding deferred fetch requests if possible.
    +    if (deferredFetchRequests.nonEmpty) {
    +      for ((remoteAddress, defReqQueue) <- deferredFetchRequests) {
    +        while (isRemoteBlockFetchable(defReqQueue) &&
    +            !isRemoteAddressMaxedOut(remoteAddress, defReqQueue.front)) {
    +          val request = defReqQueue.dequeue()
    +          logDebug(s"Processing deferred fetch request for $remoteAddress with "
    +            + s"${request.blocks.length} blocks")
    +          send(remoteAddress, request)
    +          if (defReqQueue.isEmpty) {
    +            deferredFetchRequests -= remoteAddress
    +          }
    +        }
    +      }
    +    }
    +
    +    // Process any regular fetch requests if possible.
    +    while (isRemoteBlockFetchable(fetchRequests)) {
    +      val request = fetchRequests.dequeue()
    +      val remoteAddress = request.address
    +      if (isRemoteAddressMaxedOut(remoteAddress, request)) {
    +        logDebug(s"Deferring fetch request for $remoteAddress with ${request.blocks.size}
blocks")
    +        val defReqQueue = deferredFetchRequests.getOrElse(remoteAddress, new Queue[FetchRequest]())
    +        defReqQueue.enqueue(request)
    +        deferredFetchRequests(remoteAddress) = defReqQueue
    +      } else {
    +        send(remoteAddress, request)
    +      }
    +    }
    +
    +    def send(remoteAddress: BlockManagerId, request: FetchRequest): Unit = {
    +      sendRequest(request)
    +      numBlocksInFlightPerAddress(remoteAddress) =
    +        numBlocksInFlightPerAddress.getOrElse(remoteAddress, 0) + request.blocks.size
    +    }
    +
    +    def isRemoteBlockFetchable(fetchReqQueue: Queue[FetchRequest]): Boolean = {
    +      fetchReqQueue.nonEmpty &&
    +        (bytesInFlight == 0 ||
    +          (reqsInFlight + 1 <= maxReqsInFlight &&
    +            bytesInFlight + fetchReqQueue.front.size <= maxBytesInFlight))
    +    }
    +
    +    // Checks if sending a new fetch request will exceed the max no. of blocks being
fetched from a
    +    // given remote address.
    +    def isRemoteAddressMaxedOut(remoteHost: BlockManagerId, request: FetchRequest): Boolean
= {
    --- End diff --
    
    Is this `remoteHost` or `remoteAddress`?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message