spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sryza <...@git.apache.org>
Subject [GitHub] spark pull request: [SPARK-5529][CORE]Add expireDeadHosts in Heart...
Date Fri, 06 Feb 2015 09:08:12 GMT
Github user sryza commented on a diff in the pull request:

    https://github.com/apache/spark/pull/4363#discussion_r24229250
  
    --- Diff: core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala ---
    @@ -17,33 +17,86 @@
     
     package org.apache.spark
     
    -import akka.actor.Actor
    +import scala.concurrent.duration._
    +import scala.collection.mutable
    +
    +import akka.actor.{Actor, Cancellable}
    +
     import org.apache.spark.executor.TaskMetrics
     import org.apache.spark.storage.BlockManagerId
    -import org.apache.spark.scheduler.TaskScheduler
    +import org.apache.spark.scheduler.{SlaveLost, TaskScheduler}
     import org.apache.spark.util.ActorLogReceive
     
     /**
      * A heartbeat from executors to the driver. This is a shared message used by several
internal
    - * components to convey liveness or execution information for in-progress tasks.
    + * components to convey liveness or execution information for in-progress tasks. It will
also 
    + * expire the hosts that have not heartbeated for more than spark.driver.executorTimeoutMs.
      */
     private[spark] case class Heartbeat(
         executorId: String,
         taskMetrics: Array[(Long, TaskMetrics)], // taskId -> TaskMetrics
         blockManagerId: BlockManagerId)
     
    +private[spark] case object ExpireDeadHosts 
    +    
     private[spark] case class HeartbeatResponse(reregisterBlockManager: Boolean)
     
     /**
      * Lives in the driver to receive heartbeats from executors..
      */
    -private[spark] class HeartbeatReceiver(scheduler: TaskScheduler)
    +private[spark] class HeartbeatReceiver(sc: SparkContext, scheduler: TaskScheduler)
       extends Actor with ActorLogReceive with Logging {
     
    +  val executorLastSeen = new mutable.HashMap[String, Long]
    +  
    +  val executorTimeout = sc.conf.getLong("spark.driver.executorTimeoutMs", 
    +    sc.conf.getLong("spark.storage.blockManagerSlaveTimeoutMs", 120 * 1000))
    +  
    +  val checkTimeoutInterval = sc.conf.getLong("spark.driver.executorTimeoutIntervalMs",
    +    sc.conf.getLong("spark.storage.blockManagerTimeoutIntervalMs", 60000))
    +  
    +  var timeoutCheckingTask: Cancellable = null
    +  
    +  override def preStart(): Unit = {
    +    import context.dispatcher
    +    timeoutCheckingTask = context.system.scheduler.schedule(0.seconds,
    +      checkTimeoutInterval.milliseconds, self, ExpireDeadHosts)
    +    super.preStart
    +  }
    +  
       override def receiveWithLogging = {
         case Heartbeat(executorId, taskMetrics, blockManagerId) =>
           val response = HeartbeatResponse(
             !scheduler.executorHeartbeatReceived(executorId, taskMetrics, blockManagerId))
    +      heartbeatReceived(executorId)
           sender ! response
    +    case ExpireDeadHosts =>
    +      expireDeadHosts()
    +  }
    +  
    +  private def heartbeatReceived(executorId: String): Unit = {
    +    executorLastSeen(executorId) = System.currentTimeMillis()
    +  }
    +  
    +  private def expireDeadHosts(): Unit = {
    +    logTrace("Checking for hosts with no recent heart beats in HeartbeatReceiver.")
    --- End diff --
    
    nit: remove space between "heart" and "beats"


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message