spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From andrewor14 <...@git.apache.org>
Subject [GitHub] spark pull request: [SPARK-1953][YARN]yarn client mode Application...
Date Thu, 18 Dec 2014 19:36:18 GMT
Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3607#discussion_r22065439
  
    --- Diff: yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala ---
    @@ -39,23 +39,37 @@ private[spark] class ClientArguments(args: Array[String], sparkConf:
SparkConf)
       var appName: String = "Spark"
       var priority = 0
     
    -  // Additional memory to allocate to containers
    -  // For now, use driver's memory overhead as our AM container's memory overhead
    -  val amMemoryOverhead = sparkConf.getInt("spark.yarn.driver.memoryOverhead",
    -    math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toInt, MEMORY_OVERHEAD_MIN))
    -
    -  val executorMemoryOverhead = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
    -    math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, MEMORY_OVERHEAD_MIN))
    -
       private val isDynamicAllocationEnabled =
         sparkConf.getBoolean("spark.dynamicAllocation.enabled", false)
     
       parseArgs(args.toList)
    +
    +  val isClusterMode = userClass != null
    +
       loadEnvironmentArgs()
       validateArgs()
     
    +  // Additional memory to allocate to containers. In different modes, we use different
configs.
    +  val amMemoryOverheadConf = if (isClusterMode) {
    +    "spark.yarn.driver.memoryOverhead"
    +  } else {
    +    "spark.yarn.am.memoryOverhead"
    +  }
    +  val amMemoryOverhead = sparkConf.getInt(amMemoryOverheadConf,
    +    math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toInt, MEMORY_OVERHEAD_MIN))
    +
    +  val executorMemoryOverhead = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
    +    math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, MEMORY_OVERHEAD_MIN))
    +
       /** Load any default arguments provided through environment variables and Spark properties.
*/
       private def loadEnvironmentArgs(): Unit = {
    +    // In cluster mode, the driver and the AM live in the same JVM, so this does not
apply
    +    if (!isClusterMode) {
    +      amMemory = Utils.memoryStringToMb(sparkConf.get("spark.yarn.am.memory", "512m"))
    +    } else {
    +      println("spark.yarn.am.memory is set but does not apply in cluster mode, " +
    --- End diff --
    
    we should also warn against `spark.yarn.driver.memoryOverhead` being set in client mode,
since it won't have any effect.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message