ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From swa...@apache.org
Subject ambari git commit: AMBARI-19827. HiveServer2 Interactive won't start in clusters with less memory.
Date Thu, 09 Feb 2017 00:06:12 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk c219d3577 -> de420a88a


AMBARI-19827. HiveServer2 Interactive won't start in clusters with less memory.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/de420a88
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/de420a88
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/de420a88

Branch: refs/heads/trunk
Commit: de420a88a7ab54a0a668054e6cf68521d27de0bf
Parents: c219d35
Author: Swapan Shridhar <sshridhar@hortonworks.com>
Authored: Wed Feb 8 13:41:14 2017 -0800
Committer: Swapan Shridhar <sshridhar@hortonworks.com>
Committed: Wed Feb 8 16:05:56 2017 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.5/services/stack_advisor.py    |  48 ++--
 .../stacks/2.5/common/test_stack_advisor.py     | 243 +++++++++++++++++--
 2 files changed, 250 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/de420a88/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index aa47493..99b6776 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -895,7 +895,6 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     yarn_min_container_size = float(self.get_yarn_min_container_size(services, configurations))
     tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_capacity))
     normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)
-    min_memory_required = min_memory_required + normalized_tez_am_container_size
 
     if yarn_site and "yarn.nodemanager.resource.cpu-vcores" in yarn_site:
       cpu_per_nm_host = float(yarn_site["yarn.nodemanager.resource.cpu-vcores"])
@@ -908,7 +907,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     # Calculate the available memory for LLAP app
     yarn_nm_mem_in_mb_normalized = self._normalizeDown(yarn_nm_mem_in_mb, yarn_min_container_size)
-    mem_per_thread_for_llap = self.calculate_mem_per_thread_for_llap(services, yarn_nm_mem_in_mb_normalized,
cpu_per_nm_host)
+    mem_per_thread_for_llap = float(self.calculate_mem_per_thread_for_llap(services, yarn_nm_mem_in_mb_normalized,
cpu_per_nm_host))
     Logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized
: {1}, "
                   "cpu_per_nm_host : {2}".format(mem_per_thread_for_llap, yarn_nm_mem_in_mb_normalized,
cpu_per_nm_host))
 
@@ -917,8 +916,26 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
+    # Get calculated value for Slider AM container Size
+    slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
+                                                 yarn_min_container_size)
+    Logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size
: "
+                "{1}".format(slider_am_container_size, yarn_min_container_size))
+
+    min_memory_required = normalized_tez_am_container_size + slider_am_container_size + self._normalizeUp(mem_per_thread_for_llap,
yarn_min_container_size)
+    Logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size:
{1}, "
+                "normalized_tez_am_container_size : {2}, mem_per_thread_for_llap : {3}, yarn_min_container_size
: "
+                "{4}".format(min_memory_required, slider_am_container_size, normalized_tez_am_container_size,
mem_per_thread_for_llap, yarn_min_container_size))
+
+    min_nodes_required = int(math.ceil( min_memory_required / yarn_nm_mem_in_mb_normalized))
+    Logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required
: {1}, yarn_nm_mem_in_mb_normalized "
+                ": {2}".format(min_nodes_required, min_memory_required, yarn_nm_mem_in_mb_normalized))
+    if min_nodes_required > node_manager_cnt:
+      Logger.warn("ERROR: Not enough memory/nodes to run LLAP");
+      self.recommendDefaultLlapConfiguration(configurations, services, hosts)
+      return
+
     mem_per_thread_for_llap = float(mem_per_thread_for_llap)
-    min_memory_required = min_memory_required + self._normalizeUp(mem_per_thread_for_llap,
yarn_min_container_size)
 
     Logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
     if not selected_queue_is_ambari_managed_llap:
@@ -943,7 +960,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       Logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc
: {1}, "
                     "total_llap_mem_normalized : {2}".format(hive_tez_am_cap_available, queue_am_fraction_perc,
total_llap_mem_normalized))
     else:  # Ambari managed 'llap' named queue at root level.
-      num_llap_nodes_requested = self.get_num_llap_nodes(services, configurations) #Input
+      # Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise,
read from config.
+
+      # Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
+      # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty
in this case)
+      if (changed_configs_has_enable_hive_int or  0 == len(services['changed-configurations']))
\
+        and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
+        num_llap_nodes_requested = min_nodes_required
+      else:
+        num_llap_nodes_requested = self.get_num_llap_nodes(services, configurations) #Input
       total_llap_mem = num_llap_nodes_requested * yarn_nm_mem_in_mb_normalized
       Logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested
: {1}, "
                     "yarn_nm_mem_in_mb_normalized : {2}".format(total_llap_mem, num_llap_nodes_requested,
yarn_nm_mem_in_mb_normalized))
@@ -967,24 +992,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     # Common calculations now, irrespective of the queue selected.
 
-    # Get calculated value for Slider AM container Size
-    slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
-                                                 yarn_min_container_size)
-    Logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size
: "
-                  "{1}".format(slider_am_container_size, yarn_min_container_size))
-    min_memory_required = min_memory_required + slider_am_container_size
     llap_mem_for_tezAm_and_daemons = total_llap_mem_normalized - slider_am_container_size
     Logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following
: total_llap_mem_normalized : {1}, "
                   "slider_am_container_size : {2}".format(llap_mem_for_tezAm_and_daemons,
total_llap_mem_normalized, slider_am_container_size))
 
-    Logger.info("DBG: min_memory_required: {0}, yarn_nm_mem_in_mb_normalized: {1}".format(min_memory_required,
yarn_nm_mem_in_mb_normalized))
-    min_nodes_required = int(ceil( min_memory_required / yarn_nm_mem_in_mb_normalized))
-    Logger.info("DBG: min_node_required: {0}".format(min_nodes_required))
-    if min_nodes_required > node_manager_cnt:
-      Logger.warn("ERROR: Not enough memory/nodes to run LLAP");
-      self.recommendDefaultLlapConfiguration(configurations, services, hosts)
-      return
-
     if llap_mem_for_tezAm_and_daemons < 2 * yarn_min_container_size:
       Logger.warning("Not enough capacity available on the cluster to run LLAP")
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
@@ -1301,7 +1312,6 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)
     if hsi_site and 'hive.tez.container.size' in hsi_site:
       hive_container_size = hsi_site['hive.tez.container.size']
-
     return hive_container_size
 
   def get_llap_headroom_space(self, services, configurations):

http://git-wip-us.apache.org/repos/asf/ambari/blob/de420a88/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 9029dbb..ec56bad 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -1958,15 +1958,15 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'],
'1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'],
{'minimum': '1', 'maximum': '4'})
-    self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler':
'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=0.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=0.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mini
 mum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=100.0\nyarn.scheduler.capacity.root.llap.capacity=100.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
+    self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler':
'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=66.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=66.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mi
 nimum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=34.0\nyarn.scheduler.capacity.root.llap.capacity=34.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
 
     self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'],
3)
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'],
'11594')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'],
'10571')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'],
'3')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'],
'3')
 
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'5450')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'4427')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'],
'true')
 
     self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'],
'4915')
@@ -1981,8 +1981,206 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.queue.name'],
{'entries': [{'value': 'default', 'label': 'default'}, {'value': 'llap', 'label': 'llap'}]})
 
 
-
   # Test 8: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
+  #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
+  #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'enable_hive_interactive'
+  #
+  #         Small configuration test with 3 nodes - 'yarn.nodemanager.resource.memory-mb'
: 2046 and 'yarn.scheduler.minimum-allocation-mb' : 682, representing a small GCE cluster.
+  #
+  #         Expected : Configurations values recommended for llap related configs.
+  def test_recommendYARNConfigurations_three_node_manager_llap_configs_updated_2(self):
+    # 3 node managers and yarn.nodemanager.resource.memory-mb": "12288"
+    services = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6401.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'false',
+          u'type': u'hive-interactive-env',
+          u'name': u'enable_hive_interactive'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'num_llap_nodes':'3',
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size':'682'
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "682",
+            "yarn.nodemanager.resource.memory-mb": "2046",
+            "yarn.nodemanager.resource.cpu-vcores": '3'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "682"
+          }
+        },
+        "hive-site":
+          {
+            'properties': {
+              'hive.tez.container.size': '1024'
+            }
+          },
+      }
+    }
+
+
+    clusterData = {
+      "cpu": 4,
+      "mapMemory": 30000,
+      "amMemory": 20000,
+      "reduceMemory": 20560,
+      "containers": 30,
+      "ramPerContainer": 341,
+      "referenceNodeManagerHost" : {
+        "total_mem" : 12288 * 1024
+      },
+      "yarnMinContainerSize": 341
+    }
+
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services,
self.hosts)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'],
'1')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'],
{'minimum': '1', 'maximum': '3.0'})
+    self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler':
'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=66.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=66.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mi
 nimum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=34.0\nyarn.scheduler.capacity.root.llap.capacity=34.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
+
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'],
3)
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'],
'682')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'],
'1')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'],
'1')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'0')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'],
'false')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'],
'545')
+    self.assertEqual(configurations['hive-interactive-env']['properties']['hive_heapsize'],
'2048')
+    self.assertEqual(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'],
{'maximum': '3', 'minimum': '1', 'read_only': 'false'})
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'],
'682')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'],
'llap')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.auto.convert.join.noconditionaltask.size'],
'189792256')
+
+    self.assertEqual(configurations['tez-interactive-site']['properties']['tez.am.resource.memory.mb'],
'682')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.queue.name'],
{'entries': [{'value': 'default', 'label': 'default'}, {'value': 'llap', 'label': 'llap'}]})
+
+
+
+  # Test 9: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
   #         'capacity-scheduler' configs are passed-in as single "/n" separated string  and
   #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'hive.server2.tez.sessions.per.default.queue'
   #         Expected : Configurations values recommended for llap related configs.
@@ -2185,7 +2383,7 @@ class TestHDP25StackAdvisor(TestCase):
   ####################### 'Five Node Managers' cluster - tests for calculating llap configs
################
 
 
-  # Test 9: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
+  # Test 10: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
   #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'num_llap_nodes'
   #         Expected : Configurations values recommended for llap related configs.
@@ -2381,7 +2579,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 10: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
+  # Test 11: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
   #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'enable_hive_interactive'
   #         Expected : Configurations values recommended for llap related configs.
@@ -2552,17 +2750,17 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services,
self.hosts)
 
-    self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler':
'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=0.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=0.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mini
 mum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=100.0\nyarn.scheduler.capacity.root.llap.capacity=100.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'],
'2.0')
-    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'],
{'maximum': '5.0', 'minimum': '1'})
+    self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler':
'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=80.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=80.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mi
 nimum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=20.0\nyarn.scheduler.capacity.root.llap.capacity=20.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'],
'1')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'],
{'maximum': '4', 'minimum': '1'})
 
     self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'],
3)
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'],
'204259')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'],
'203918')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'],
'10')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'],
'10')
 
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'183779')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'183438')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'],
'true')
 
     self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'],
'16384')
@@ -2578,7 +2776,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 11: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
+  # Test 12: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
   #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'hive.server2.tez.sessions.per.default.queue'
   #         Expected : Configurations values recommended for llap related configs.
@@ -2777,7 +2975,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 12: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
+  # Test 13 (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler,
and
   #          'capacity-scheduler' configs are passed-in as dictionary and
   #          services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
is set to value "null"  and
   #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected
for 'hive.server2.tez.sessions.per.default.queue'
@@ -2963,7 +3161,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 13: (1). Multiple queue exist at various depths in capacity-scheduler, and 'capacity-scheduler'
configs are
+  # Test 14: (1). Multiple queue exist at various depths in capacity-scheduler, and 'capacity-scheduler'
configs are
   #               passed-in as dictionary and services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
   #               is set to value "null"  and
   #          (2). Selected queue in 'hive.llap.daemon.queue.name' is 'default.b'
@@ -3184,7 +3382,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 14: (1). Multiple queue exist at various depths in capacity-scheduler, and 'capacity-scheduler'
configs are
+  # Test 15: (1). Multiple queue exist at various depths in capacity-scheduler, and 'capacity-scheduler'
configs are
   #               passed-in as dictionary and services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
   #               is set to value "null"  and
   #          (2). Selected queue in 'hive.llap.daemon.queue.name' is 'default.b' and is in
STOPPED state
@@ -3404,7 +3602,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 15: (1). only 'default' queue exists at root level in capacity-scheduler, and
+  # Test 16: (1). only 'default' queue exists at root level in capacity-scheduler, and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
   #         Expected : 'hive.llap.daemon.queue.name' property attributes getting set with
current YARN leaf queues.
   #                    'hive.server2.tez.default.queues' value getting set to value of 'hive.llap.daemon.queue.name'
(llap).
@@ -3521,7 +3719,8 @@ class TestHDP25StackAdvisor(TestCase):
               'hive.server2.tez.sessions.per.default.queue': '1',
               'hive.llap.daemon.num.executors' : '1',
               'hive.llap.daemon.yarn.container.mb' : '10240',
-              'hive.llap.io.memory.size' : '512'
+              'hive.llap.io.memory.size' : '512',
+              'hive.tez.container.size' : '1024'
             }
           },
         "hive-env":
@@ -3581,16 +3780,16 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'],
'llap')
     self.assertEquals(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'],
                       {'maximum': '1', 'minimum': '1', 'read_only': 'false'})
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'],
'0')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'],
'3')
 
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'0')
-    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'],
'0')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'],
'197632')
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'],
'2457')
 
 
 
 
 
-  # Test 16: (1). only 'default' queue exists at root level in capacity-scheduler, and
+  # Test 17: (1). only 'default' queue exists at root level in capacity-scheduler, and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string 
and
   #         change in 'hive.llap.daemon.queue.name' value detected.
   #         Expected : 'hive.llap.daemon.queue.name' property attributes getting set with
current YARN leaf queues.
@@ -3784,7 +3983,7 @@ class TestHDP25StackAdvisor(TestCase):
 
 
 
-  # Test 17: capacity-scheduler malformed as input in services.
+  # Test 18: capacity-scheduler malformed as input in services.
   #         Expected : No changes.
   def test_recommendYARNConfigurations_no_update_to_llap_queue_7(self):
     services= {


Mime
View raw message