spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vanzin <...@git.apache.org>
Subject [GitHub] spark pull request #19041: [SPARK-21097][CORE] Add option to recover cached ...
Date Tue, 10 Oct 2017 18:54:34 GMT
Github user vanzin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19041#discussion_r143802830
  
    --- Diff: core/src/main/scala/org/apache/spark/CacheRecoveryManager.scala ---
    @@ -0,0 +1,250 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark
    +
    +import java.util.concurrent.{ScheduledFuture, TimeUnit}
    +
    +import scala.collection.mutable
    +import scala.concurrent.{ExecutionContext, Future}
    +import scala.util.Failure
    +
    +import org.apache.spark.internal.Logging
    +import org.apache.spark.internal.config.DYN_ALLOCATION_RECOVER_CACHE_TIMEOUT
    +import org.apache.spark.rpc.RpcEndpointRef
    +import org.apache.spark.storage.{BlockId, BlockManagerId, RDDBlockId}
    +import org.apache.spark.storage.BlockManagerMessages.{GetCachedBlocks, GetMemoryStatus,
GetSizeOfBlocks, ReplicateOneBlock}
    +import org.apache.spark.util.ThreadUtils
    +
    +/**
    + * Responsible for asynchronously replicating all of an executors cached blocks, and
then shutting
    + * it down.
    + */
    +final private class CacheRecoveryManager(
    +    state: CacheRecoveryManagerState,
    +    conf: SparkConf)
    +  extends Logging {
    +
    +  private val threadPool = ThreadUtils.newDaemonCachedThreadPool("recover-cache-shutdown-pool")
    +  private implicit val asyncExecutionContext: ExecutionContext =
    +    ExecutionContext.fromExecutorService(threadPool)
    +
    +  /**
    +   * Start the recover cache shutdown process for these executors
    +   *
    +   * @param execIds the executors to start shutting down
    +   */
    +  def startExecutorKill(execIds: Seq[String]): Unit = {
    +    logDebug(s"Recover cached data before shutting down executors ${execIds.mkString(",
")}.")
    +    checkForReplicableBlocks(execIds)
    +  }
    +
    +  /**
    +   * Stops all thread pools
    +   *
    +   * @return
    +   */
    +  def stop(): java.util.List[Runnable] = {
    +    threadPool.shutdownNow()
    +    state.stop()
    +  }
    +
    +  /**
    +   * Get list of cached blocks from BlockManagerMaster. If there are cached blocks, replicate
them,
    +   * otherwise kill the executors
    +   *
    +   * @param execIds the executors to check
    +   */
    +  private def checkForReplicableBlocks(execIds: Seq[String]) = state.getBlocks(execIds).foreach
{
    +    case (executorId, HasCachedBlocks) => replicateBlocks(executorId)
    +    case (executorId, NoMoreBlocks | NotEnoughMemory) => state.killExecutor(executorId)
    +  }
    +
    +  /**
    +   * Replicate one cached block on an executor. If there are more, repeat. If there are
none, check
    +   * with the block manager master again. If there is an error, go ahead and kill executor.
    +   *
    +   * @param execId the executor to save a block one
    +   */
    +  private def replicateBlocks(execId: String): Unit = {
    +    import scala.util.Success
    +    val (response, blockId) = state.replicateFirstBlock(execId)
    +    response.onComplete {
    +      case Success(true) =>
    +        logTrace(s"Finished replicating block ${blockId.getOrElse("unknown")} on exec
$execId.")
    +        replicateBlocks(execId)
    +      case Success(false) =>
    +        checkForReplicableBlocks(Seq(execId))
    +      case Failure(f) =>
    +        logWarning(s"Error trying to replicate block ${blockId.getOrElse("unknown")}.",
f)
    +        state.killExecutor(execId)
    +    }
    +  }
    +}
    +
    +private object CacheRecoveryManager {
    +  def apply(eam: ExecutorAllocationManager, conf: SparkConf): CacheRecoveryManager =
{
    +    val bmme = SparkEnv.get.blockManager.master.driverEndpoint
    +    val state = new CacheRecoveryManagerState(bmme, eam, conf)
    +    new CacheRecoveryManager(state, conf)
    +  }
    +}
    +
    +/**
    + * Private class that holds state for all the executors being shutdown.
    + *
    + * @param blockManagerMasterEndpoint blockManagerMasterEndpoint
    + * @param executorAllocationManager ExecutorAllocationManager
    + * @param conf spark conf
    + */
    +final private class CacheRecoveryManagerState(
    +   blockManagerMasterEndpoint: RpcEndpointRef,
    +   executorAllocationManager: ExecutorAllocationManager,
    +   conf: SparkConf
    + ) extends Logging {
    +
    +  private val forceKillAfterS = conf.get(DYN_ALLOCATION_RECOVER_CACHE_TIMEOUT)
    +  private val scheduler =
    +    ThreadUtils.newDaemonSingleThreadScheduledExecutor("recover-cache-shutdown-timers")
    --- End diff --
    
    s/recover-cache/cache-recovery


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message