ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From smoha...@apache.org
Subject ambari git commit: AMBARI-19338. AM sizing for LLAP - min container size changes (smohanty)
Date Fri, 13 Jan 2017 23:31:17 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 89452768c -> 86ed53e74


AMBARI-19338. AM sizing for LLAP - min container size changes (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/86ed53e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/86ed53e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/86ed53e7

Branch: refs/heads/trunk
Commit: 86ed53e7427c58578bcbb2b894aa68d2f8c46698
Parents: 8945276
Author: Sumit Mohanty <smohanty@hortonworks.com>
Authored: Thu Jan 12 15:16:33 2017 -0800
Committer: Sumit Mohanty <smohanty@hortonworks.com>
Committed: Fri Jan 13 15:28:15 2017 -0800

----------------------------------------------------------------------
 .../src/main/resources/scripts/stack_advisor.py |   5 +
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  68 +++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  17 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   | 383 ++++++++++++++++++-
 .../stacks/2.2/common/test_stack_advisor.py     |  74 ++--
 .../stacks/2.3/common/test_stack_advisor.py     |   3 +
 .../stacks/2.5/common/test_stack_advisor.py     |  60 ++-
 7 files changed, 529 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 1d9b4a2..50f2b59 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -100,18 +100,23 @@ def main(argv=None):
   hosts = stackAdvisor.filterHostMounts(hosts, services)
 
   if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
+    services['context'] = {'call_type': 'recommendComponentLayout'}
     result = stackAdvisor.recommendComponentLayout(services, hosts)
     result_file = os.path.join(actionDir, "component-layout.json")
   elif action == VALIDATE_COMPONENT_LAYOUT_ACTION:
+    services['context'] = {'call_type': 'validateComponentLayout'}
     result = stackAdvisor.validateComponentLayout(services, hosts)
     result_file = os.path.join(actionDir, "component-layout-validation.json")
   elif action == RECOMMEND_CONFIGURATIONS:
+    services['context'] = {'call_type': 'recommendConfigurations'}
     result = stackAdvisor.recommendConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
   elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
+    services['context'] = {'call_type': 'recommendConfigurationDependencies'}
     result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
   else:  # action == VALIDATE_CONFIGURATIONS
+    services['context'] = {'call_type': 'validateConfigurations'}
     result = stackAdvisor.validateConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations-validation.json")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index bcd61b5..9816702 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -173,7 +173,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if "referenceNodeManagerHost" in clusterData:
       nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024,
nodemanagerMinRam)
     putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers']
* clusterData['ramPerContainer'], nodemanagerMinRam))))
-    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
+    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['minContainerRam']))
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
@@ -1108,7 +1108,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
 
     cluster["minContainerSize"] = {
-      cluster["ram"] <= 4: 256,
+      cluster["ram"] <= 3: 128,
+      3 < cluster["ram"] <= 4: 256,
       4 < cluster["ram"] <= 8: 512,
       8 < cluster["ram"] <= 24: 1024,
       24 < cluster["ram"]: 2048
@@ -1118,22 +1119,64 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if cluster["hBaseInstalled"]:
       totalAvailableRam -= cluster["hbaseRam"]
     cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
+    Logger.info("Memory for YARN apps - cluster[totalAvailableRam]: " + str(cluster["totalAvailableRam"]))
+
+    suggestedMinContainerRam = 1024
+    callContext = getCallContext(services)
+
+    if services:  # its never None but some unit tests pass it as None
+      if None != getOldValue(self, services, "yarn-site", "yarn.scheduler.minimum-allocation-mb")
or \
+              'recommendConfigurations' != callContext:
+        '''yarn.scheduler.minimum-allocation-mb has changed - then pick this value up'''
+        if "yarn-site" in services["configurations"] and \
+                "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]
and \
+                str(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]).isdigit():
+          Logger.info("Using user provided yarn.scheduler.minimum-allocation-mb = " +
+                      str(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+          cluster["minContainerRam"] = int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+          Logger.info("Minimum ram per container due to user input - cluster[minContainerRam]:
" + str(cluster["minContainerRam"]))
+          if cluster["minContainerRam"] > cluster["totalAvailableRam"]:
+            cluster["minContainerRam"] = cluster["totalAvailableRam"]
+            Logger.info("Minimum ram per container after checking against limit - cluster[minContainerRam]:
" + str(cluster["minContainerRam"]))
+            pass
+          cluster["minContainerSize"] = cluster["minContainerRam"]
+          suggestedMinContainerRam = cluster["minContainerRam"]
+          pass
+        pass
+      pass
+
+
     '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
-    cluster["containers"] = round(max(3,
+    cluster["containers"] = int(round(max(3,
                                 min(2 * cluster["cpu"],
                                     min(ceil(1.8 * cluster["disk"]),
-                                            cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
+                                            cluster["totalAvailableRam"] / cluster["minContainerSize"])))))
+    Logger.info("Containers per node - cluster[containers]: " + str(cluster["containers"]))
+
+    if cluster["containers"] * cluster["minContainerSize"] > cluster["totalAvailableRam"]:
+      cluster["containers"] = ceil(cluster["totalAvailableRam"] / cluster["minContainerSize"])
+      Logger.info("Modified number of containers based on provided value for yarn.scheduler.minimum-allocation-mb")
+      pass
+
+    cluster["ramPerContainer"] = int(abs(cluster["totalAvailableRam"] / cluster["containers"]))
+    cluster["minContainerRam"] = min(suggestedMinContainerRam, cluster["ramPerContainer"])
+    Logger.info("Ram per containers before normalization - cluster[ramPerContainer]: " +
str(cluster["ramPerContainer"]))
+
+    '''If greater than cluster["minContainerRam"], value will be in multiples of cluster["minContainerRam"]'''
+    if cluster["ramPerContainer"] > cluster["minContainerRam"]:
+      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / cluster["minContainerRam"])
* cluster["minContainerRam"]
 
-    '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
-    cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
-    '''If greater than 1GB, value will be in multiples of 512.'''
-    if cluster["ramPerContainer"] > 1024:
-      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
 
     cluster["mapMemory"] = int(cluster["ramPerContainer"])
     cluster["reduceMemory"] = cluster["ramPerContainer"]
     cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
 
+    Logger.info("Min container size - cluster[minContainerRam]: " + str(cluster["minContainerRam"]))
+    Logger.info("Available memory for map - cluster[mapMemory]: " + str(cluster["mapMemory"]))
+    Logger.info("Available memory for reduce - cluster[reduceMemory]: " + str(cluster["reduceMemory"]))
+    Logger.info("Available memory for am - cluster[amMemory]: " + str(cluster["amMemory"]))
+
+
     return cluster
 
   def getServiceConfigurationValidators(self):
@@ -2027,6 +2070,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     service_meta = service_meta[0]
     return [item[__stack_service_components]["component_name"] for item in service_meta["components"]]
 
+def getCallContext(services):
+  if services:
+    if 'context' in services:
+      Logger.info("context : " + str (services['context']))
+      return services['context']['call_type']
+  return ""
+
 
 def getOldValue(self, services, configType, propertyName):
   if services:

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index b601179..8980398 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -1147,7 +1147,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     nodemanagerMinRam = 1048576 # 1TB in mb
     if "referenceNodeManagerHost" in clusterData:
       nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024,
nodemanagerMinRam)
-    putMapredProperty('yarn.app.mapreduce.am.resource.mb', configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+    putMapredProperty('yarn.app.mapreduce.am.resource.mb', max(int(clusterData['ramPerContainer']),int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])))
     putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * int(configurations["mapred-site"]["properties"]["yarn.app.mapreduce.am.resource.mb"])))
+ "m" + " -Dhdp.version=${hdp.version}")
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     min_mapreduce_map_memory_mb = 0
@@ -1157,8 +1157,19 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       min_mapreduce_map_memory_mb = 1536
       min_mapreduce_reduce_memory_mb = 1536
       min_mapreduce_map_java_opts = 1024
-    putMapredProperty('mapreduce.map.memory.mb', min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]),
max(min_mapreduce_map_memory_mb, int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))))
-    putMapredProperty('mapreduce.reduce.memory.mb', min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]),
max(min_mapreduce_reduce_memory_mb, min(2*int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]),
int(nodemanagerMinRam)))))
+
+    putMapredProperty('mapreduce.map.memory.mb',
+                      min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]),
+                          max(min_mapreduce_map_memory_mb,
+                              max(int(clusterData['ramPerContainer']),
+                                  int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])))))
+    putMapredProperty('mapreduce.reduce.memory.mb',
+                      min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]),
+                          max(max(min_mapreduce_reduce_memory_mb,
+                                  int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])),
+                              min(2*int(clusterData['ramPerContainer']),
+                                  int(nodemanagerMinRam)))))
+
     mapredMapXmx = int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.map.memory.mb"]));
     putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(max(min_mapreduce_map_java_opts,
mapredMapXmx)) + "m")
     putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.reduce.memory.mb"])))
+ "m")

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 4a936c8..1145154 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -258,7 +258,7 @@ class TestHDP206StackAdvisor(TestCase):
     result = self.stackAdvisor.validateConfigurations(services, hosts)
 
     expectedItems = [
-      {"message": "Value is less than the recommended default of 512", "level": "WARN"},
+      {"message": "Value is less than the recommended default of 510", "level": "WARN"},
       {'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group',
'level': 'ERROR'},
       {"message": "Value should be integer", "level": "ERROR"},
       {"message": "Value should be set", "level": "ERROR"}
@@ -387,6 +387,348 @@ class TestHDP206StackAdvisor(TestCase):
     ]
     self.assertValidationResult(expectedItems, result)
 
+  def test__getConfigurationClusterSummary_withContext1(self):
+    servicesList = ["HDFS", "YARN"]
+    components = []
+    hosts = {
+      "items" : [
+        {
+          "Hosts" : {
+            "host_name" : "host1",
+            "cpu_count" : 2,
+            "total_mem" : 20471556,
+            "disk_info" : [
+              {
+                "available" : "21052800",
+                "device" : "/dev/vda1",
+                "used" : "3303636",
+                "percent" : "14%",
+                "size" : "25666616",
+                "type" : "ext4",
+                "mountpoint" : "/"
+              },
+              {
+                "available" : "244732200",
+                "device" : "/dev/vdb",
+                "used" : "60508",
+                "percent" : "1%",
+                "size" : "257899908",
+                "type" : "ext4",
+                "mountpoint" : "/grid/0"
+              }
+            ]
+          }
+        }
+      ]
+    }
+
+    services = {"services":
+                  [{"StackServices":
+                      {"service_name": "YARN",
+                       "service_version": "2.6.0.2.2"
+                      },
+                    "components": [
+                      {
+                        "StackServiceComponents": {
+                          "advertise_version": "true",
+                          "cardinality": "1+",
+                          "component_category": "SLAVE",
+                          "component_name": "NODEMANAGER",
+                          "custom_commands": [
+
+                          ],
+                          "display_name": "NodeManager",
+                          "is_client": "false",
+                          "is_master": "false",
+                          "service_name": "YARN",
+                          "stack_name": "HDP",
+                          "stack_version": "2.2",
+                          "hostnames": [
+                            "host1"
+                          ]
+                        },
+                        "dependencies": [
+                        ]
+                      }
+                    ],
+                    }],
+                "configurations": {
+                  "yarn-site" : {
+                    "properties" : {
+                      "yarn.scheduler.minimum-allocation-mb" : "1024",
+                      "yarn.scheduler.maximum-allocation-mb" : "2048"
+                    }
+                  }
+                },
+                "changed-configurations" : [ ]
+    }
+
+    expected = {
+      "hBaseInstalled": False,
+      "components": components,
+      "cpu": 2,
+      "disk": 2,
+      "ram": 19,
+      "reservedRam": 4,
+      "hbaseRam": 4,
+      "minContainerSize": 1024,
+      "minContainerRam" : 1024,
+      "totalAvailableRam": 15360,
+      "containers": 4,
+      "ramPerContainer": 3072,
+      "mapMemory": 3072,
+      "reduceMemory": 3072,
+      "amMemory": 3072,
+      "referenceHost": hosts["items"][0]["Hosts"],
+      "referenceNodeManagerHost": hosts["items"][0]["Hosts"]
+    }
+
+    # Cluster create call
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
+    self.assertEquals(result, expected)
+
+    # Validate configuration call - pick user specified value which is same as what was set
+    services = {"services":
+                  [{"StackServices":
+                      {"service_name": "YARN",
+                       "service_version": "2.6.0.2.2"
+                      },
+                    "components": [
+                      {
+                        "StackServiceComponents": {
+                          "advertise_version": "true",
+                          "cardinality": "1+",
+                          "component_category": "SLAVE",
+                          "component_name": "NODEMANAGER",
+                          "custom_commands": [
+
+                          ],
+                          "display_name": "NodeManager",
+                          "is_client": "false",
+                          "is_master": "false",
+                          "service_name": "YARN",
+                          "stack_name": "HDP",
+                          "stack_version": "2.2",
+                          "hostnames": [
+                            "host1"
+                          ]
+                        },
+                        "dependencies": [
+                        ]
+                      }
+                    ],
+                   }],
+                "configurations": {
+                  "yarn-site": {
+                    "properties": {
+                      "yarn.scheduler.minimum-allocation-mb": "1024",
+                      "yarn.scheduler.maximum-allocation-mb": "2048"
+                    }
+                  }
+                },
+                "changed-configurations": [],
+                "context": {'call_type': 'validateConfigurations'}
+    }
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
+    self.assertEquals(result, expected)
+
+    # Validate configuration call - pick user specified value
+    services = {"services":
+                  [{"StackServices":
+                      {"service_name": "YARN",
+                       "service_version": "2.6.0.2.2"
+                      },
+                    "components": [
+                      {
+                        "StackServiceComponents": {
+                          "advertise_version": "true",
+                          "cardinality": "1+",
+                          "component_category": "SLAVE",
+                          "component_name": "NODEMANAGER",
+                          "custom_commands": [
+
+                          ],
+                          "display_name": "NodeManager",
+                          "is_client": "false",
+                          "is_master": "false",
+                          "service_name": "YARN",
+                          "stack_name": "HDP",
+                          "stack_version": "2.2",
+                          "hostnames": [
+                            "host1"
+                          ]
+                        },
+                        "dependencies": [
+                        ]
+                      }
+                    ],
+                    }],
+                "configurations": {
+                  "yarn-site": {
+                    "properties": {
+                      "yarn.scheduler.minimum-allocation-mb": "2048",
+                      "yarn.scheduler.maximum-allocation-mb": "12288"
+                    }
+                  }
+                },
+                "changed-configurations": [],
+                "context": {'call_type': 'validateConfigurations'}
+    }
+
+    expected_2048 = {
+      "hBaseInstalled": False,
+      "components": components,
+      "cpu": 2,
+      "disk": 2,
+      "ram": 19,
+      "reservedRam": 4,
+      "hbaseRam": 4,
+      "minContainerSize": 2048,
+      "minContainerRam" : 2048,
+      "totalAvailableRam": 15360,
+      "containers": 4,
+      "ramPerContainer": 2048,
+      "mapMemory": 2048,
+      "reduceMemory": 2048,
+      "amMemory": 2048,
+      "referenceHost": hosts["items"][0]["Hosts"],
+      "referenceNodeManagerHost": hosts["items"][0]["Hosts"]
+    }
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
+    self.assertEquals(result, expected_2048)
+
+    # Recommend config dependencies call - pick user specified value
+    services = {"services":
+                  [{"StackServices":
+                      {"service_name": "YARN",
+                       "service_version": "2.6.0.2.2"
+                      },
+                    "components": [
+                      {
+                        "StackServiceComponents": {
+                          "advertise_version": "true",
+                          "cardinality": "1+",
+                          "component_category": "SLAVE",
+                          "component_name": "NODEMANAGER",
+                          "custom_commands": [
+
+                          ],
+                          "display_name": "NodeManager",
+                          "is_client": "false",
+                          "is_master": "false",
+                          "service_name": "YARN",
+                          "stack_name": "HDP",
+                          "stack_version": "2.2",
+                          "hostnames": [
+                            "host1"
+                          ]
+                        },
+                        "dependencies": [
+                        ]
+                      }
+                    ],
+                    }],
+                "configurations": {
+                  "yarn-site": {
+                    "properties": {
+                      "yarn.scheduler.minimum-allocation-mb": "2048",
+                      "yarn.scheduler.maximum-allocation-mb": "12288"
+                    }
+                  }
+                },
+                "changed-configurations": [],
+                "context": {'call_type': 'recommendConfigurationDependencies'}
+    }
+
+    expected_2048 = {
+      "hBaseInstalled": False,
+      "components": components,
+      "cpu": 2,
+      "disk": 2,
+      "ram": 19,
+      "reservedRam": 4,
+      "hbaseRam": 4,
+      "minContainerSize": 2048,
+      "minContainerRam" : 2048,
+      "totalAvailableRam": 15360,
+      "containers": 4,
+      "ramPerContainer": 2048,
+      "mapMemory": 2048,
+      "reduceMemory": 2048,
+      "amMemory": 2048,
+      "referenceHost": hosts["items"][0]["Hosts"],
+      "referenceNodeManagerHost": hosts["items"][0]["Hosts"]
+    }
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
+    self.assertEquals(result, expected_2048)
+
+    # Recommend config dependencies call - pick user specified value of 4096 for min
+    services = {"services":
+                  [{"StackServices":
+                      {"service_name": "YARN",
+                       "service_version": "2.6.0.2.2"
+                      },
+                    "components": [
+                      {
+                        "StackServiceComponents": {
+                          "advertise_version": "true",
+                          "cardinality": "1+",
+                          "component_category": "SLAVE",
+                          "component_name": "NODEMANAGER",
+                          "custom_commands": [
+
+                          ],
+                          "display_name": "NodeManager",
+                          "is_client": "false",
+                          "is_master": "false",
+                          "service_name": "YARN",
+                          "stack_name": "HDP",
+                          "stack_version": "2.2",
+                          "hostnames": [
+                            "host1"
+                          ]
+                        },
+                        "dependencies": [
+                        ]
+                      }
+                    ],
+                    }],
+                "configurations": {
+                  "yarn-site": {
+                    "properties": {
+                      "yarn.scheduler.minimum-allocation-mb": "4096",
+                      "yarn.scheduler.maximum-allocation-mb": "12288"
+                    }
+                  }
+                },
+                "changed-configurations": [],
+                "context": {'call_type': 'recommendConfigurationDependencies'}
+    }
+
+    expected_4096 = {
+      "hBaseInstalled": False,
+      "components": components,
+      "cpu": 2,
+      "disk": 2,
+      "ram": 19,
+      "reservedRam": 4,
+      "hbaseRam": 4,
+      "minContainerSize": 4096,
+      "minContainerRam" : 4096,
+      "totalAvailableRam": 15360,
+      "containers": 3,
+      "ramPerContainer": 4096,
+      "mapMemory": 4096,
+      "reduceMemory": 4096,
+      "amMemory": 4096,
+      "referenceHost": hosts["items"][0]["Hosts"],
+      "referenceNodeManagerHost": hosts["items"][0]["Hosts"]
+    }
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
+    self.assertEquals(result, expected_4096)
+
+
+
   def test_getConfigurationClusterSummary_withHBaseAnd6gbRam(self):
     servicesList = ["HBASE"]
     components = []
@@ -419,6 +761,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 2,
       "hbaseRam": 1,
       "minContainerSize": 512,
+      "minContainerRam" : 512,
       "totalAvailableRam": 3072,
       "containers": 6,
       "ramPerContainer": 512,
@@ -487,15 +830,16 @@ class TestHDP206StackAdvisor(TestCase):
         })
     expected["referenceHost"] = hosts["items"][1]["Hosts"]
     expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
-    expected["amMemory"] = 170.66666666666666
-    expected["containers"] = 3.0
+    expected["amMemory"] = 128
+    expected["containers"] = 4
     expected["cpu"] = 4
     expected["totalAvailableRam"] = 512
-    expected["mapMemory"] = 170
-    expected["minContainerSize"] = 256
-    expected["reduceMemory"] = 170.66666666666666
+    expected["mapMemory"] = 128
+    expected["minContainerSize"] = 128
+    expected["reduceMemory"] = 128
+    expected["minContainerRam"] = 128
     expected["ram"] = 0
-    expected["ramPerContainer"] = 170.66666666666666
+    expected["ramPerContainer"] = 128
     expected["reservedRam"] = 1
     result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components,
services)
     self.assertEquals(result, expected)
@@ -537,6 +881,7 @@ class TestHDP206StackAdvisor(TestCase):
       "mapMemory": 3072,
       "reduceMemory": 3072,
       "amMemory": 3072,
+      "minContainerRam": 1024,
       "referenceHost": hosts["items"][0]["Hosts"]
     }
 
@@ -592,7 +937,8 @@ class TestHDP206StackAdvisor(TestCase):
     services = {"configurations": configurations, "services": []}
     clusterData = {
       "containers" : 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "yarn-env": {
@@ -619,7 +965,8 @@ class TestHDP206StackAdvisor(TestCase):
     clusterData = {
       "mapMemory": 567,
       "reduceMemory": 345.6666666666666,
-      "amMemory": 123.54
+      "amMemory": 123.54,
+      "minContainerRam": 123.54
     }
     expected = {
       "mapred-site": {
@@ -655,13 +1002,14 @@ class TestHDP206StackAdvisor(TestCase):
       "ram": 0,
       "reservedRam": 1,
       "hbaseRam": 1,
-      "minContainerSize": 256,
+      "minContainerSize": 128,
       "totalAvailableRam": 512,
       "containers": 3,
-      "ramPerContainer": 170.66666666666666,
+      "ramPerContainer": 170,
       "mapMemory": 170,
-      "reduceMemory": 170.66666666666666,
-      "amMemory": 170.66666666666666
+      "reduceMemory": 170,
+      "amMemory": 170,
+      "minContainerRam" : 170
     }
 
     self.assertEquals(result, expected)
@@ -1171,7 +1519,8 @@ class TestHDP206StackAdvisor(TestCase):
     }
 
     clusterData = {
-      "totalAvailableRam": 2048
+      "totalAvailableRam": 2048,
+      "totalAvailableRam": 256
     }
     ambariHostName = socket.getfqdn()
     expected = {'oozie-env':
@@ -1210,7 +1559,7 @@ class TestHDP206StackAdvisor(TestCase):
                       'namenode_heapsize': '1024',
                       'proxyuser_group': 'users',
                       'namenode_opt_maxnewsize': '256',
-                      'namenode_opt_newsize': '256'}}}
+                      'namenode_opt_newsize': '128'}}}
 
     # Apart from testing other HDFS recommendations, also tests 'hadoop.proxyuser.hive.hosts'
config value which includes both HiveServer
     # and Hive Server Interactive Host (installed on different host compared to HiveServer).
@@ -1345,7 +1694,7 @@ class TestHDP206StackAdvisor(TestCase):
                       'namenode_heapsize': '1024',
                       'proxyuser_group': 'users',
                       'namenode_opt_maxnewsize': '256',
-                      'namenode_opt_newsize': '256'}}}
+                      'namenode_opt_newsize': '128'}}}
 
     # Apart from testing other HDFS recommendations, also tests 'hadoop.proxyuser.hive.hosts'
config value which includes both HiveServer
     # and Hive Server Interactive Host (installed on same host compared to HiveServer).
@@ -1478,7 +1827,7 @@ class TestHDP206StackAdvisor(TestCase):
                       'namenode_heapsize': '1024',
                       'proxyuser_group': 'users',
                       'namenode_opt_maxnewsize': '256',
-                      'namenode_opt_newsize': '256'}}}
+                      'namenode_opt_newsize': '128'}}}
 
     self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services3,
hosts)
     self.assertEquals(configurations, expected)

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index db71ec8..de8f2b0 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -86,10 +86,11 @@ class TestHDP22StackAdvisor(TestCase):
     }
     clusterData = {
       "mapMemory": 3000,
-      "amMemory": 2000,
+      "amMemory": 2048,
       "reduceMemory": 2056,
       "containers": 3,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "tez-site": {
@@ -183,6 +184,7 @@ class TestHDP22StackAdvisor(TestCase):
       "amMemory": 3100,
       "reduceMemory": 2056,
       "containers": 3,
+      "minContainerRam": 256,
       "ramPerContainer": 256
     }
     expected = {
@@ -265,10 +267,11 @@ class TestHDP22StackAdvisor(TestCase):
     }
     clusterData = {
       "mapMemory": 760,
-      "amMemory": 2000,
+      "amMemory": 2048,
       "reduceMemory": 760,
       "containers": 3,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "tez-site": {
@@ -884,7 +887,8 @@ class TestHDP22StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers" : 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "yarn-env": {
@@ -923,7 +927,8 @@ class TestHDP22StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers": 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "spark-defaults": {
@@ -955,7 +960,8 @@ class TestHDP22StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers" : 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "yarn-env": {
@@ -1216,7 +1222,8 @@ class TestHDP22StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers" : 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
 
     services = {
@@ -1263,7 +1270,8 @@ class TestHDP22StackAdvisor(TestCase):
       "amMemory": 2000,
       "reduceMemory": 2056,
       "containers": 3,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
 
     expected = {
@@ -1754,6 +1762,7 @@ class TestHDP22StackAdvisor(TestCase):
       "containers" : 7,
       "ramPerContainer": 256,
       "totalAvailableRam": 4096,
+      "minContainerRam": 256
     }
     expected = {
       "cluster-env": {
@@ -1772,9 +1781,9 @@ class TestHDP22StackAdvisor(TestCase):
           'mapreduce.job.queuename': 'default',
           "mapreduce.map.memory.mb": "1536",
           "mapreduce.reduce.memory.mb": "1536",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx80m -Dhdp.version=${hdp.version}",
+          "yarn.app.mapreduce.am.command-opts": "-Xmx204m -Dhdp.version=${hdp.version}",
           "mapreduce.reduce.java.opts": "-Xmx1228m",
-          "yarn.app.mapreduce.am.resource.mb": "100",
+          "yarn.app.mapreduce.am.resource.mb": "256",
           "mapreduce.map.java.opts": "-Xmx1228m",
           "mapreduce.task.io.sort.mb": "859"
         },
@@ -2003,7 +2012,7 @@ class TestHDP22StackAdvisor(TestCase):
           "mapreduce.map.memory.mb": "1024",
           "mapreduce.reduce.memory.mb": "682",
           "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
-          "mapreduce.reduce.java.opts": "-Xmx546m",
+          "mapreduce.reduce.java.opts": "-Xmx560m",
           "yarn.app.mapreduce.am.resource.mb": "682",
           "mapreduce.map.java.opts": "-Xmx546m",
           "mapreduce.task.io.sort.mb": "273"
@@ -2021,7 +2030,8 @@ class TestHDP22StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers" : 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "yarn-env": {
@@ -2033,13 +2043,13 @@ class TestHDP22StackAdvisor(TestCase):
       "mapred-site": {
         "properties": {
           'mapreduce.job.queuename': 'default',
-          "mapreduce.map.memory.mb": "100",
-          "mapreduce.reduce.memory.mb": "200",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx80m -Dhdp.version=${hdp.version}",
-          "mapreduce.reduce.java.opts": "-Xmx160m",
-          "yarn.app.mapreduce.am.resource.mb": "100",
-          "mapreduce.map.java.opts": "-Xmx80m",
-          "mapreduce.task.io.sort.mb": "56"
+          "mapreduce.map.memory.mb": "256",
+          "mapreduce.reduce.memory.mb": "512",
+          "yarn.app.mapreduce.am.command-opts": "-Xmx204m -Dhdp.version=${hdp.version}",
+          "mapreduce.reduce.java.opts": "-Xmx409m",
+          "yarn.app.mapreduce.am.resource.mb": "256",
+          "mapreduce.map.java.opts": "-Xmx204m",
+          "mapreduce.task.io.sort.mb": "142"
         },
         "property_attributes": {
           'mapreduce.task.io.sort.mb': {'maximum': '2047'},
@@ -2249,9 +2259,9 @@ class TestHDP22StackAdvisor(TestCase):
             "properties": {
                 'mapreduce.job.queuename': 'default',
                 "mapreduce.map.memory.mb": "700",
-                "mapreduce.reduce.memory.mb": "1280",
+                "mapreduce.reduce.memory.mb": "700",
                 "yarn.app.mapreduce.am.command-opts": "-Xmx560m -Dhdp.version=${hdp.version}",
-                "mapreduce.reduce.java.opts": "-Xmx1024m",
+                "mapreduce.reduce.java.opts": "-Xmx560m",
                 "yarn.app.mapreduce.am.resource.mb": "700",
                 "mapreduce.map.java.opts": "-Xmx560m",
                 "mapreduce.task.io.sort.mb": "392"
@@ -3808,13 +3818,13 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.linux-container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "33792",
+          "yarn.scheduler.minimum-allocation-mb": "1024",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
@@ -3833,13 +3843,13 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "33792"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "33792"
           }
         }
       }
@@ -3867,13 +3877,13 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.linux-container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "33792",
+          "yarn.scheduler.minimum-allocation-mb": "1024",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
@@ -3901,13 +3911,13 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "33792"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "33792"
           },
           "yarn.nodemanager.linux-container-executor.resources-handler.class": {
             "delete": "true"

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index c89eee4..295aee8 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -212,6 +212,9 @@ class TestHDP23StackAdvisor(TestCase):
       ]
     }
     services = {
+      "context" : {
+        "call_type" : "recommendConfigurations"
+      },
       "services" : [ {
         "StackServices":{
           "service_name": "YARN",

http://git-wip-us.apache.org/repos/asf/ambari/blob/86ed53e7/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index a7ccdfe..d4a301c 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -374,7 +374,8 @@ class TestHDP25StackAdvisor(TestCase):
     clusterData = {
       "cpu": 4,
       "containers": 5,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       "spark2-defaults": {
@@ -801,7 +802,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
 
@@ -969,7 +971,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
     configurations = {
@@ -1149,7 +1152,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
 
@@ -1348,7 +1352,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 2048
-      }
+      },
+      "minContainerRam": 512
     }
 
 
@@ -1542,7 +1547,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 1024,
       "referenceNodeManagerHost" : {
         "total_mem" : 51200 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -1744,7 +1750,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 2048,
       "referenceNodeManagerHost" : {
         "total_mem" : 40960 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -1940,7 +1947,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 341,
       "referenceNodeManagerHost" : {
         "total_mem" : 12288 * 1024
-      }
+      },
+      "minContainerRam": 341
     }
 
 
@@ -2138,7 +2146,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 2048,
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -2340,7 +2349,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 3072,
       "referenceNodeManagerHost" : {
         "total_mem" : 40960 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
 
@@ -2538,7 +2548,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 341,
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
-      }
+      },
+      "minContainerRam": 341
     }
 
 
@@ -2735,7 +2746,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 82240,
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -2927,7 +2939,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 82240,
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
 
@@ -3148,7 +3161,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
     configurations = {
@@ -3374,7 +3388,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
     configurations = {
@@ -3566,7 +3581,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 82240,
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -3748,7 +3764,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 82240,
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
-      }
+      },
+      "minContainerRam": 1024
     }
 
     configurations = {
@@ -3880,7 +3897,8 @@ class TestHDP25StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
-      }
+      },
+      "minContainerRam": 512
     }
 
     configurations = {
@@ -3925,7 +3943,8 @@ class TestHDP25StackAdvisor(TestCase):
       "amMemory": 2000,
       "reduceMemory": 2056,
       "containers": 3,
-      "ramPerContainer": 256
+      "ramPerContainer": 256,
+      "minContainerRam": 256
     }
     expected = {
       'application-properties': {
@@ -4660,7 +4679,8 @@ class TestHDP25StackAdvisor(TestCase):
       "cpu": 4,
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
-      }
+      },
+      "minContainerRam": 256
     }
     hosts = {
       "items" : [


Mime
View raw message