ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [2/4] ambari git commit: AMBARI-20617. Display log level, method name from stack_advisor in ambari-server.log (dlysnichenko)
Date Thu, 30 Mar 2017 12:49:09 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index b693f9f..2a1113a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -20,7 +20,6 @@ limitations under the License.
 import math
 
 from ambari_commons.str_utils import string_set_equals
-from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
 
@@ -29,7 +28,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
   def __init__(self):
     super(HDP25StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDP25StackAdvisor")
     self.HIVE_INTERACTIVE_SITE = 'hive-interactive-site'
     self.YARN_ROOT_DEFAULT_QUEUE_NAME = 'default'
     self.AMBARI_MANAGED_LLAP_QUEUE_NAME = 'llap'
@@ -42,11 +41,11 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     putOozieEnvProperty = self.putProperty(configurations, "oozie-env", services)
 
     if not "oozie-env" in services["configurations"] :
-      Logger.info("No oozie configurations available")
+      self.logger.info("No oozie configurations available")
       return
 
     if not "FALCON_SERVER" in clusterData["components"] :
-      Logger.info("Falcon is not part of the installation")
+      self.logger.info("Falcon is not part of the installation")
       return
 
     falconUser = 'falcon'
@@ -54,35 +53,35 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     if "falcon-env" in services["configurations"] :
       if "falcon_user" in services["configurations"]["falcon-env"]["properties"] :
         falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
-        Logger.info("Falcon user from configuration: %s " % falconUser)
+        self.logger.info("Falcon user from configuration: %s " % falconUser)
 
-    Logger.info("Falcon user : %s" % falconUser)
+    self.logger.info("Falcon user : %s" % falconUser)
 
     oozieUser = 'oozie'
 
     if "oozie_user" \
       in services["configurations"]["oozie-env"]["properties"] :
       oozieUser = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
-      Logger.info("Oozie user from configuration %s" % oozieUser)
+      self.logger.info("Oozie user from configuration %s" % oozieUser)
 
-    Logger.info("Oozie user %s" % oozieUser)
+    self.logger.info("Oozie user %s" % oozieUser)
 
     if "oozie_admin_users" \
             in services["configurations"]["oozie-env"]["properties"] :
       currentAdminUsers =  services["configurations"]["oozie-env"]["properties"]["oozie_admin_users"]
-      Logger.info("Oozie admin users from configuration %s" % currentAdminUsers)
+      self.logger.info("Oozie admin users from configuration %s" % currentAdminUsers)
     else :
       currentAdminUsers = "{0}, oozie-admin".format(oozieUser)
-      Logger.info("Setting default oozie admin users to %s" % currentAdminUsers)
+      self.logger.info("Setting default oozie admin users to %s" % currentAdminUsers)
 
 
     if falconUser in currentAdminUsers :
-      Logger.info("Falcon user %s already member of  oozie admin users " % falconUser)
+      self.logger.info("Falcon user %s already member of  oozie admin users " % falconUser)
       return
 
     newAdminUsers = "{0},{1}".format(currentAdminUsers, falconUser)
 
-    Logger.info("new oozie admin users : %s" % newAdminUsers)
+    self.logger.info("new oozie admin users : %s" % newAdminUsers)
 
     services["forced-configurations"].append({"type" : "oozie-env", "name" : "oozie_admin_users"})
     putOozieEnvProperty("oozie_admin_users", newAdminUsers)
@@ -158,7 +157,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     auth_type = application_properties['atlas.authentication.method.ldap.type']
     auth_ldap_enable = application_properties['atlas.authentication.method.ldap'].lower() == 'true'
-    Logger.info("Validating Atlas configs, authentication type: %s" % str(auth_type))
+    self.logger.info("Validating Atlas configs, authentication type: %s" % str(auth_type))
 
     # Required props
     ldap_props = {"atlas.authentication.method.ldap.url": "",
@@ -308,7 +307,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
 
     if not capacity_scheduler_properties:
-      Logger.warning("Couldn't retrieve 'capacity-scheduler' properties while doing validation checks for Hive Server Interactive.")
+      self.logger.warning("Couldn't retrieve 'capacity-scheduler' properties while doing validation checks for Hive Server Interactive.")
       return []
 
     if hsi_site:
@@ -327,7 +326,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
                       "app to run".format(llap_queue_name, llap_queue_cap_perc, min_reqd_queue_cap_perc)
             validationItems.append({"config-name": "hive.llap.daemon.queue.name", "item": self.getErrorItem(errMsg1)})
         else:
-          Logger.error("Couldn't retrieve '{0}' queue's capacity from 'capacity-scheduler' while doing validation checks for "
+          self.logger.error("Couldn't retrieve '{0}' queue's capacity from 'capacity-scheduler' while doing validation checks for "
            "Hive Server Interactive.".format(llap_queue_name))
 
         # Validate that current selected queue in 'hive.llap.daemon.queue.name' state is not STOPPED.
@@ -338,10 +337,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
               .format(llap_queue_name, llap_selected_queue_state)
             validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getErrorItem(errMsg2)})
         else:
-          Logger.error("Couldn't retrieve '{0}' queue's state from 'capacity-scheduler' while doing validation checks for "
+          self.logger.error("Couldn't retrieve '{0}' queue's state from 'capacity-scheduler' while doing validation checks for "
                        "Hive Server Interactive.".format(llap_queue_name))
       else:
-        Logger.error("Couldn't retrieve 'hive.llap.daemon.queue.name' config from 'hive-interactive-site' while doing "
+        self.logger.error("Couldn't retrieve 'hive.llap.daemon.queue.name' config from 'hive-interactive-site' while doing "
                      "validation checks for Hive Server Interactive.")
 
       # Validate that 'hive.server2.enable.doAs' config is not set to 'true' for Hive2.
@@ -555,7 +554,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
       atlas_rest_address_list = ["{0}://{1}:{2}".format(scheme, hostname, metadata_port) for hostname in atlas_host_names]
       atlas_rest_address = ",".join(atlas_rest_address_list)
-      Logger.info("Constructing atlas.rest.address=%s" % atlas_rest_address)
+      self.logger.info("Constructing atlas.rest.address=%s" % atlas_rest_address)
     return atlas_rest_address
 
   def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
@@ -699,15 +698,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       putHbaseSiteProperty('hbase.master.ui.readonly', 'true')
 
       phoenix_query_server_hosts = self.get_phoenix_query_server_hosts(services, hosts)
-      Logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts))
+      self.logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts))
       if phoenix_query_server_hosts:
-        Logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts))
+        self.logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts))
         # The PQS hosts we want to ensure are set
         new_value = ','.join(phoenix_query_server_hosts)
         # Update the proxyuser setting, deferring to out callback to merge results together
         self.put_proxyuser_value("HTTP", new_value, services=services, configurations=configurations, put_function=putCoreSiteProperty)
       else:
-        Logger.debug("No phoenix query server hosts to update")
+        self.logger.debug("No phoenix query server hosts to update")
     else:
       putHbaseSiteProperty('hbase.master.ui.readonly', 'false')
 
@@ -722,7 +721,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       return [host['Hosts']['host_name'] for host in phoenix_query_server_hosts]
 
   def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
-    Logger.info("DBG: Invoked recommendHiveConfiguration")
+    self.logger.info("DBG: Invoked recommendHiveConfiguration")
     super(HDP25StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
     putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
     putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
@@ -747,9 +746,9 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
           if hive_tez_default_queue:
             putHiveInteractiveSiteProperty("hive.server2.tez.default.queues", hive_tez_default_queue)
-            Logger.debug("Updated 'hive.server2.tez.default.queues' config : '{0}'".format(hive_tez_default_queue))
+            self.logger.debug("Updated 'hive.server2.tez.default.queues' config : '{0}'".format(hive_tez_default_queue))
     else:
-      Logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
+      self.logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
       putHiveInteractiveEnvProperty('enable_hive_interactive', 'false')
       putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "true")
 
@@ -810,14 +809,14 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
       Note: All memory calculations are in MB, unless specified otherwise.
     """
-    Logger.info("DBG: Entered updateLlapConfigs");
+    self.logger.info("DBG: Entered updateLlapConfigs");
 
     # Determine if we entered here during cluster creation.
     operation = getUserOperationContext(services, "operation")
     is_cluster_create_opr = False
     if operation == self.CLUSTER_CREATE_OPERATION:
       is_cluster_create_opr = True
-    Logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr))
+    self.logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr))
 
     putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
@@ -852,9 +851,9 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     if capacity_scheduler_properties:
       # Get all leaf queues.
       leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
-      Logger.info("YARN leaf Queues = {0}".format(leafQueueNames))
+      self.logger.info("YARN leaf Queues = {0}".format(leafQueueNames))
       if len(leafQueueNames) == 0:
-        Logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.")
+        self.logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.")
         return
 
       # Check if it's 1st invocation after enabling Hive Server Interactive (config: enable_hive_interactive).
@@ -873,20 +872,20 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', first_leaf_queue)
           putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', first_leaf_queue)
           llap_named_queue_selected_in_curr_invocation = False
-      Logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation))
+      self.logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation))
 
       if (len(leafQueueNames) == 2 and (llap_daemon_selected_queue_name and llap_daemon_selected_queue_name == llap_queue_name) or
         llap_named_queue_selected_in_curr_invocation) or \
         (len(leafQueueNames) == 1 and llap_daemon_selected_queue_name == 'default' and llap_named_queue_selected_in_curr_invocation):
-          Logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'False'.")
+          self.logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'False'.")
           putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "false")
           selected_queue_is_ambari_managed_llap = True
-          Logger.info("DBG: Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
+          self.logger.info("DBG: Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
                         "slider visibility to 'True'".format(llap_queue_name, list(leafQueueNames)))
       else:
-        Logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
+        self.logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
         putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "true")
-        Logger.info("Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
+        self.logger.info("Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
                      "visibility to 'False'.".format(llap_daemon_selected_queue_name, list(leafQueueNames)))
         selected_queue_is_ambari_managed_llap = False
 
@@ -896,17 +895,17 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         if llap_daemon_selected_queue_name:
           llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
           if llap_selected_queue_state is None or llap_selected_queue_state == "STOPPED":
-            Logger.error("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default "
+            self.logger.error("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default "
                        "values.".format(llap_daemon_selected_queue_name, llap_selected_queue_state))
             self.recommendDefaultLlapConfiguration(configurations, services, hosts)
             return
         else:
-          Logger.error("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
+          self.logger.error("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
                      .format(llap_daemon_selected_queue_name))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
                    " Not calculating LLAP configs.")
       return
 
@@ -928,16 +927,16 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     if not changed_configs_in_hive_int_env and not llap_concurrency_in_changed_configs and \
       not llap_daemon_queue_in_changed_configs and services["changed-configurations"]:
-      Logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.")
-      Logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
+      self.logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.")
+      self.logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
       return
 
-    Logger.info("\nDBG: Performing LLAP config calculations ......")
+    self.logger.info("\nDBG: Performing LLAP config calculations ......")
     node_manager_host_list = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
     node_manager_cnt = len(node_manager_host_list)
     yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
     total_cluster_capacity = node_manager_cnt * yarn_nm_mem_in_mb
-    Logger.info("DBG: Calculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
+    self.logger.info("DBG: Calculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
                 "yarn_nm_mem_in_mb : {2}".format(total_cluster_capacity, node_manager_cnt, yarn_nm_mem_in_mb))
     yarn_min_container_size = float(self.get_yarn_min_container_size(services, configurations))
     tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_capacity), is_cluster_create_opr,
@@ -949,7 +948,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     else:
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
+    self.logger.info("DBG Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
                 "total_cluster_capacity : {2}".format(normalized_tez_am_container_size, tez_am_container_size,
                                                       total_cluster_capacity))
 
@@ -957,7 +956,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     yarn_nm_mem_in_mb_normalized = self._normalizeDown(yarn_nm_mem_in_mb, yarn_min_container_size)
     mem_per_thread_for_llap = float(self.calculate_mem_per_thread_for_llap(services, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host,
                                                                            is_cluster_create_opr, changed_configs_has_enable_hive_int))
-    Logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, "
+    self.logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, "
                   "cpu_per_nm_host : {2}".format(mem_per_thread_for_llap, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host))
 
 
@@ -968,48 +967,48 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     # Get calculated value for Slider AM container Size
     slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
                                                  yarn_min_container_size)
-    Logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size : "
+    self.logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size : "
                 "{1}".format(slider_am_container_size, yarn_min_container_size))
 
     min_memory_required = normalized_tez_am_container_size + slider_am_container_size + self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
-    Logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size: {1}, "
+    self.logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size: {1}, "
                 "normalized_tez_am_container_size : {2}, mem_per_thread_for_llap : {3}, yarn_min_container_size : "
                 "{4}".format(min_memory_required, slider_am_container_size, normalized_tez_am_container_size, mem_per_thread_for_llap, yarn_min_container_size))
 
     min_nodes_required = int(math.ceil( min_memory_required / yarn_nm_mem_in_mb_normalized))
-    Logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required : {1}, yarn_nm_mem_in_mb_normalized "
+    self.logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required : {1}, yarn_nm_mem_in_mb_normalized "
                 ": {2}".format(min_nodes_required, min_memory_required, yarn_nm_mem_in_mb_normalized))
     if min_nodes_required > node_manager_cnt:
-      Logger.warning("ERROR: Not enough memory/nodes to run LLAP");
+      self.logger.warning("ERROR: Not enough memory/nodes to run LLAP");
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
     mem_per_thread_for_llap = float(mem_per_thread_for_llap)
 
-    Logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
+    self.logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
     if not selected_queue_is_ambari_managed_llap:
       llap_daemon_selected_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_daemon_selected_queue_name, total_cluster_capacity)
 
       if llap_daemon_selected_queue_cap <= 0:
-        Logger.warning("'{0}' queue capacity percentage retrieved = {1}. Expected > 0.".format(
+        self.logger.warning("'{0}' queue capacity percentage retrieved = {1}. Expected > 0.".format(
           llap_daemon_selected_queue_name, llap_daemon_selected_queue_cap))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
       total_llap_mem_normalized = self._normalizeDown(llap_daemon_selected_queue_cap, yarn_min_container_size)
-      Logger.info("DBG: Calculated '{0}' queue available capacity : {1}, using following: llap_daemon_selected_queue_cap : {2}, "
+      self.logger.info("DBG: Calculated '{0}' queue available capacity : {1}, using following: llap_daemon_selected_queue_cap : {2}, "
                     "yarn_min_container_size : {3}".format(llap_daemon_selected_queue_name, total_llap_mem_normalized,
                                                            llap_daemon_selected_queue_cap, yarn_min_container_size))
       '''Rounding up numNodes so that we run more daemons, and utilitze more CPUs. The rest of the calcaulations will take care of cutting this down if required'''
       num_llap_nodes_requested = math.ceil(total_llap_mem_normalized / yarn_nm_mem_in_mb_normalized)
-      Logger.info("DBG: Calculated 'num_llap_nodes_requested' : {0}, using following: total_llap_mem_normalized : {1}, "
+      self.logger.info("DBG: Calculated 'num_llap_nodes_requested' : {0}, using following: total_llap_mem_normalized : {1}, "
                     "yarn_nm_mem_in_mb_normalized : {2}".format(num_llap_nodes_requested, total_llap_mem_normalized, yarn_nm_mem_in_mb_normalized))
       # Pouplate the 'num_llap_nodes_requested' in config 'num_llap_nodes', a read only config for non-Ambari managed queue case.
       putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes_requested)
-      Logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested))
+      self.logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested))
       queue_am_fraction_perc = float(self.__getQueueAmFractionFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name))
       hive_tez_am_cap_available = queue_am_fraction_perc * total_llap_mem_normalized
-      Logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, "
+      self.logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, "
                     "total_llap_mem_normalized : {2}".format(hive_tez_am_cap_available, queue_am_fraction_perc, total_llap_mem_normalized))
     else:  # Ambari managed 'llap' named queue at root level.
       # Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
@@ -1022,34 +1021,34 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       else:
         num_llap_nodes_requested = self.get_num_llap_nodes(services, configurations) #Input
       total_llap_mem = num_llap_nodes_requested * yarn_nm_mem_in_mb_normalized
-      Logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested : {1}, "
+      self.logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested : {1}, "
                     "yarn_nm_mem_in_mb_normalized : {2}".format(total_llap_mem, num_llap_nodes_requested, yarn_nm_mem_in_mb_normalized))
       total_llap_mem_normalized = float(self._normalizeDown(total_llap_mem, yarn_min_container_size))
-      Logger.info("DBG: Calculated 'total_llap_mem_normalized' : {0}, using following: total_llap_mem : {1}, "
+      self.logger.info("DBG: Calculated 'total_llap_mem_normalized' : {0}, using following: total_llap_mem : {1}, "
                     "yarn_min_container_size : {2}".format(total_llap_mem_normalized, total_llap_mem, yarn_min_container_size))
 
       # What percent is 'total_llap_mem' of 'total_cluster_capacity' ?
       llap_named_queue_cap_fraction = math.ceil(total_llap_mem_normalized / total_cluster_capacity * 100)
-      Logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction))
+      self.logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction))
 
       if llap_named_queue_cap_fraction > 100:
-        Logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction))
+        self.logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
       # Adjust capacity scheduler for the 'llap' named queue.
       self.checkAndManageLlapQueue(services, configurations, hosts, llap_queue_name, llap_named_queue_cap_fraction)
       hive_tez_am_cap_available = total_llap_mem_normalized
-      Logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available))
+      self.logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available))
 
     # Common calculations now, irrespective of the queue selected.
 
     llap_mem_for_tezAm_and_daemons = total_llap_mem_normalized - slider_am_container_size
-    Logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following : total_llap_mem_normalized : {1}, "
+    self.logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following : total_llap_mem_normalized : {1}, "
                   "slider_am_container_size : {2}".format(llap_mem_for_tezAm_and_daemons, total_llap_mem_normalized, slider_am_container_size))
 
     if llap_mem_for_tezAm_and_daemons < 2 * yarn_min_container_size:
-      Logger.warning("Not enough capacity available on the cluster to run LLAP")
+      self.logger.warning("Not enough capacity available on the cluster to run LLAP")
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
@@ -1059,11 +1058,11 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     # Read 'hive.server2.tez.sessions.per.default.queue' prop if it's in changed-configs, else calculate it.
     if not llap_concurrency_in_changed_configs:
       if max_executors_per_node <= 0:
-        Logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node))
+        self.logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
-      Logger.info("DBG: Calculated 'max_executors_per_node' : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
+      self.logger.info("DBG: Calculated 'max_executors_per_node' : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
                     "mem_per_thread_for_llap: {3}".format(max_executors_per_node, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
 
       # Default 1 AM for every 20 executor threads.
@@ -1071,122 +1070,122 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       # making use of total memory. However, it's possible that total memory will not be used - and the numExecutors is
       # instead limited by #CPUs. Use maxPerNode to factor this in.
       llap_concurreny_limit = min(math.floor(max_executors_per_node * num_llap_nodes_requested / DEFAULT_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
-      Logger.info("DBG: Calculated 'llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested : {2}, DEFAULT_EXECUTOR_TO_AM_RATIO "
+      self.logger.info("DBG: Calculated 'llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested : {2}, DEFAULT_EXECUTOR_TO_AM_RATIO "
                     ": {3}, MAX_CONCURRENT_QUERIES : {4}".format(llap_concurreny_limit, max_executors_per_node, num_llap_nodes_requested, DEFAULT_EXECUTOR_TO_AM_RATIO, MAX_CONCURRENT_QUERIES))
       llap_concurrency = min(llap_concurreny_limit, math.floor(llap_mem_for_tezAm_and_daemons / (DEFAULT_EXECUTOR_TO_AM_RATIO * mem_per_thread_for_llap + normalized_tez_am_container_size)))
-      Logger.info("DBG: Calculated 'llap_concurrency' : {0}, using following : llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
+      self.logger.info("DBG: Calculated 'llap_concurrency' : {0}, using following : llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
                     "{2}, DEFAULT_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
                     "{5}".format(llap_concurrency, llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, DEFAULT_EXECUTOR_TO_AM_RATIO,
                                  mem_per_thread_for_llap, normalized_tez_am_container_size))
       if llap_concurrency == 0:
         llap_concurrency = 1
-        Logger.info("DBG: Readjusted 'llap_concurrency' to : 1. Earlier calculated value : 0")
+        self.logger.info("DBG: Readjusted 'llap_concurrency' to : 1. Earlier calculated value : 0")
 
       if llap_concurrency * normalized_tez_am_container_size > hive_tez_am_cap_available:
         llap_concurrency = long(math.floor(hive_tez_am_cap_available / normalized_tez_am_container_size))
-        Logger.info("DBG: Readjusted 'llap_concurrency' to : {0}, as llap_concurrency({1}) * normalized_tez_am_container_size({2}) > hive_tez_am_cap_available({3}))"
+        self.logger.info("DBG: Readjusted 'llap_concurrency' to : {0}, as llap_concurrency({1}) * normalized_tez_am_container_size({2}) > hive_tez_am_cap_available({3}))"
                     .format(llap_concurrency, llap_concurrency, normalized_tez_am_container_size, hive_tez_am_cap_available))
 
         if llap_concurrency <= 0:
-          Logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency))
+          self.logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
-        Logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
+        self.logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
                       "{2}".format(llap_concurrency, hive_tez_am_cap_available, normalized_tez_am_container_size))
     else:
       # Read current value
       if 'hive.server2.tez.sessions.per.default.queue' in hsi_site:
         llap_concurrency = long(hsi_site['hive.server2.tez.sessions.per.default.queue'])
         if llap_concurrency <= 0:
-          Logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency))
+          self.logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
-        Logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency ))
+        self.logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency ))
       else:
         llap_concurrency = 1
-        Logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.")
+        self.logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.")
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
     # Calculate 'Max LLAP Consurrency', irrespective of whether 'llap_concurrency' was read or calculated.
     max_llap_concurreny_limit = min(math.floor(max_executors_per_node * num_llap_nodes_requested / MIN_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
-    Logger.info("DBG: Calculated 'max_llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested "
+    self.logger.info("DBG: Calculated 'max_llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested "
                   ": {2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, MAX_CONCURRENT_QUERIES : {4}".format(max_llap_concurreny_limit, max_executors_per_node,
                                                                                                num_llap_nodes_requested, MIN_EXECUTOR_TO_AM_RATIO,
                                                                                                MAX_CONCURRENT_QUERIES))
     max_llap_concurreny = long(min(max_llap_concurreny_limit, math.floor(llap_mem_for_tezAm_and_daemons / (MIN_EXECUTOR_TO_AM_RATIO *
                                                                                                       mem_per_thread_for_llap + normalized_tez_am_container_size))))
-    Logger.info("DBG: Calculated 'max_llap_concurreny' : {0}, using following : max_llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
+    self.logger.info("DBG: Calculated 'max_llap_concurreny' : {0}, using following : max_llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
                   "{2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
                   "{5}".format(max_llap_concurreny, max_llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, MIN_EXECUTOR_TO_AM_RATIO,
                                mem_per_thread_for_llap, normalized_tez_am_container_size))
     if int(max_llap_concurreny) < MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS:
-      Logger.info("DBG: Adjusting 'max_llap_concurreny' from {0} to {1}".format(max_llap_concurreny, MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS))
+      self.logger.info("DBG: Adjusting 'max_llap_concurreny' from {0} to {1}".format(max_llap_concurreny, MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS))
       max_llap_concurreny = MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS
 
     if (max_llap_concurreny * normalized_tez_am_container_size) > hive_tez_am_cap_available:
       max_llap_concurreny = math.floor(hive_tez_am_cap_available / normalized_tez_am_container_size)
       if max_llap_concurreny <= 0:
-        Logger.warning("Calculated 'Max. LLAP Concurrent Queries' = {0}. Expected value > 1".format(max_llap_concurreny))
+        self.logger.warning("Calculated 'Max. LLAP Concurrent Queries' = {0}. Expected value > 1".format(max_llap_concurreny))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
-      Logger.info("DBG: Adjusted 'max_llap_concurreny' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
+      self.logger.info("DBG: Adjusted 'max_llap_concurreny' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
                     "{2}".format(max_llap_concurreny, hive_tez_am_cap_available, normalized_tez_am_container_size))
 
     # Calculate value for 'num_llap_nodes', an across cluster config.
     tez_am_memory_required = llap_concurrency * normalized_tez_am_container_size
-    Logger.info("DBG: Calculated 'tez_am_memory_required' : {0}, using following : llap_concurrency : {1}, normalized_tez_am_container_size : "
+    self.logger.info("DBG: Calculated 'tez_am_memory_required' : {0}, using following : llap_concurrency : {1}, normalized_tez_am_container_size : "
                   "{2}".format(tez_am_memory_required, llap_concurrency, normalized_tez_am_container_size))
     llap_mem_daemon_size = llap_mem_for_tezAm_and_daemons - tez_am_memory_required
 
     if llap_mem_daemon_size < yarn_min_container_size:
-      Logger.warning("Calculated 'LLAP Daemon Size = {0}'. Expected >= 'YARN Minimum Container Size' ({1})'".format(
+      self.logger.warning("Calculated 'LLAP Daemon Size = {0}'. Expected >= 'YARN Minimum Container Size' ({1})'".format(
         llap_mem_daemon_size, yarn_min_container_size))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
     if llap_mem_daemon_size < mem_per_thread_for_llap or llap_mem_daemon_size < yarn_min_container_size:
-      Logger.warning("Not enough memory available for executors.")
+      self.logger.warning("Not enough memory available for executors.")
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'llap_mem_daemon_size' : {0}, using following : llap_mem_for_tezAm_and_daemons : {1}, tez_am_memory_required : "
+    self.logger.info("DBG: Calculated 'llap_mem_daemon_size' : {0}, using following : llap_mem_for_tezAm_and_daemons : {1}, tez_am_memory_required : "
                   "{2}".format(llap_mem_daemon_size, llap_mem_for_tezAm_and_daemons, tez_am_memory_required))
 
     llap_daemon_mem_per_node = self._normalizeDown(llap_mem_daemon_size / num_llap_nodes_requested, yarn_min_container_size)
-    Logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
+    self.logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
                   "yarn_min_container_size: {3}".format(llap_daemon_mem_per_node, llap_mem_daemon_size, num_llap_nodes_requested, yarn_min_container_size))
     if llap_daemon_mem_per_node == 0:
       # Small cluster. No capacity left on a node after running AMs.
       llap_daemon_mem_per_node = self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
       num_llap_nodes = math.floor(llap_mem_daemon_size / llap_daemon_mem_per_node)
-      Logger.info("DBG: 'llap_daemon_mem_per_node' : 0, adjusted 'llap_daemon_mem_per_node' : {0}, 'num_llap_nodes' : {1}, using following: llap_mem_daemon_size : {2}, "
+      self.logger.info("DBG: 'llap_daemon_mem_per_node' : 0, adjusted 'llap_daemon_mem_per_node' : {0}, 'num_llap_nodes' : {1}, using following: llap_mem_daemon_size : {2}, "
                     "mem_per_thread_for_llap : {3}".format(llap_daemon_mem_per_node, num_llap_nodes, llap_mem_daemon_size, mem_per_thread_for_llap))
     elif llap_daemon_mem_per_node < mem_per_thread_for_llap:
       # Previously computed value of memory per thread may be too high. Cut the number of nodes. (Alternately reduce memory per node)
       llap_daemon_mem_per_node = mem_per_thread_for_llap
       num_llap_nodes = math.floor(llap_mem_daemon_size / mem_per_thread_for_llap)
-      Logger.info("DBG: 'llap_daemon_mem_per_node'({0}) < mem_per_thread_for_llap({1}), adjusted 'llap_daemon_mem_per_node' "
+      self.logger.info("DBG: 'llap_daemon_mem_per_node'({0}) < mem_per_thread_for_llap({1}), adjusted 'llap_daemon_mem_per_node' "
                     ": {2}".format(llap_daemon_mem_per_node, mem_per_thread_for_llap, llap_daemon_mem_per_node))
     else:
       # All good. We have a proper value for memoryPerNode.
       num_llap_nodes = num_llap_nodes_requested
-      Logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
+      self.logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
 
     num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
     if num_executors_per_node_max < 1:
-      Logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
+      self.logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
+    self.logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
                   "mem_per_thread_for_llap: {3}".format(num_executors_per_node_max, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
 
     # NumExecutorsPerNode is not necessarily max - since some capacity would have been reserved for AMs, if this value were based on mem.
     num_executors_per_node = min(math.floor(llap_daemon_mem_per_node / mem_per_thread_for_llap), num_executors_per_node_max)
     if num_executors_per_node <= 0:
-      Logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node))
+      self.logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, "
+    self.logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, "
                   "mem_per_thread_for_llap: {3}".format(num_executors_per_node, llap_daemon_mem_per_node, num_executors_per_node_max, mem_per_thread_for_llap))
 
     # Now figure out how much of the memory will be used by the executors, and how much will be used by the cache.
@@ -1200,7 +1199,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     # Calculate value for prop 'llap_heap_size'
     llap_xmx = max(total_mem_for_executors_per_node * 0.8, total_mem_for_executors_per_node - self.get_llap_headroom_space(services, configurations))
-    Logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node))
+    self.logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node))
 
     # Calculate 'hive_heapsize' for Hive2/HiveServer2 (HSI)
     hive_server_interactive_heapsize = None
@@ -1211,16 +1210,16 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     if hive_server_interactive_hosts is not None and len(hive_server_interactive_hosts) > 0:
       host_mem = long(hive_server_interactive_hosts[0]["Hosts"]["total_mem"])
       hive_server_interactive_heapsize = min(max(2048.0, 400.0*llap_concurrency), 3.0/8 * host_mem)
-      Logger.info("DBG: Calculated 'hive_server_interactive_heapsize' : {0}, using following : llap_concurrency : {1}, host_mem : "
+      self.logger.info("DBG: Calculated 'hive_server_interactive_heapsize' : {0}, using following : llap_concurrency : {1}, host_mem : "
                     "{2}".format(hive_server_interactive_heapsize, llap_concurrency, host_mem))
 
     # Done with calculations, updating calculated configs.
-    Logger.info("DBG: Applying the calculated values....")
+    self.logger.info("DBG: Applying the calculated values....")
 
     if is_cluster_create_opr or changed_configs_has_enable_hive_int:
       normalized_tez_am_container_size = long(normalized_tez_am_container_size)
       putTezInteractiveSiteProperty('tez.am.resource.memory.mb', normalized_tez_am_container_size)
-      Logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size))
+      self.logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size))
 
     if not llap_concurrency_in_changed_configs:
       min_llap_concurrency = 1
@@ -1236,16 +1235,16 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt)
     #TODO A single value is not being set for numNodes in case of a custom queue. Also the attribute is set to non-visible, so the UI likely ends up using an old cached value
     if (num_llap_nodes != num_llap_nodes_requested):
-      Logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
+      self.logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
     else:
-      Logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
+      self.logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
 
     # Safeguard for not adding "num_llap_nodes_for_llap_daemons" if it doesnt exist in hive-interactive-site.
     # This can happen if we upgrade from Ambari 2.4 (with HDP 2.5) to Ambari 2.5, as this config is from 2.6 stack onwards only.
     if "hive-interactive-env" in services["configurations"] and \
         "num_llap_nodes_for_llap_daemons" in services["configurations"]["hive-interactive-env"]["properties"]:
       putHiveInteractiveEnvProperty('num_llap_nodes_for_llap_daemons', num_llap_nodes)
-      Logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes))
+      self.logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes))
 
     llap_container_size = long(llap_daemon_mem_per_node)
     putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)
@@ -1255,7 +1254,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     if is_cluster_create_opr or changed_configs_has_enable_hive_int:
       mem_per_thread_for_llap = long(mem_per_thread_for_llap)
       putHiveInteractiveSiteProperty('hive.tez.container.size', mem_per_thread_for_llap)
-      Logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap))
+      self.logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap))
 
 
     putTezInteractiveSiteProperty('tez.runtime.io.sort.mb', tez_runtime_io_sort_mb)
@@ -1267,7 +1266,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     putHiveInteractiveSiteProperty('hive.auto.convert.join.noconditionaltask.size', hive_auto_convert_join_noconditionaltask_size)
 
     num_executors_per_node = long(num_executors_per_node)
-    Logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node))
+    self.logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node))
     putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node)
     putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
     putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "maximum", long(num_executors_per_node_max))
@@ -1287,11 +1286,11 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     putHiveInteractiveEnvProperty('llap_heap_size', long(llap_xmx))
     putHiveInteractiveEnvProperty('slider_am_container_mb', long(slider_am_container_size))
-    Logger.info("DBG: Done putting all configs")
+    self.logger.info("DBG: Done putting all configs")
 
   #TODO: What is this doing? What error will be displayed on the UI if something like this is hit?
   def recommendDefaultLlapConfiguration(self, configurations, services, hosts):
-    Logger.info("DBG: Something likely went wrong. recommendDefaultLlapConfiguration")
+    self.logger.info("DBG: Something likely went wrong. recommendDefaultLlapConfiguration")
     putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
 
@@ -1344,7 +1343,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     elif hsi_env and 'num_llap_nodes' in hsi_env:
       num_llap_nodes = hsi_env['num_llap_nodes']
     else:
-      Logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes))
+      self.logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes))
 
     return float(num_llap_nodes)
 
@@ -1370,10 +1369,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       else:
         calculated_hive_tez_container_size = 4096
 
-      Logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size))
+      self.logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size))
       return calculated_hive_tez_container_size
     else:
-      Logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size))
+      self.logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size))
       return hive_tez_container_size
 
   def get_hive_tez_container_size(self, services):
@@ -1406,16 +1405,16 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     # Check if 'llap_headroom_space' is modified in current SA invocation.
     if 'hive-interactive-env' in configurations and 'llap_headroom_space' in configurations['hive-interactive-env']['properties']:
       hive_container_size = float(configurations['hive-interactive-env']['properties']['llap_headroom_space'])
-      Logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
+      self.logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
 
     if llap_headroom_space is None:
       # Check if 'llap_headroom_space' is input in services array.
       if 'llap_headroom_space' in services['configurations']['hive-interactive-env']['properties']:
         llap_headroom_space = float(services['configurations']['hive-interactive-env']['properties']['llap_headroom_space'])
-        Logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
+        self.logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
     if not llap_headroom_space or llap_headroom_space < 1:
       llap_headroom_space = 6144 # 6GB
-      Logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
+      self.logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
 
     return llap_headroom_space
 
@@ -1444,15 +1443,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     # Check if services["changed-configurations"] is empty and 'yarn.scheduler.minimum-allocation-mb' is modified in current ST invocation.
     if not services["changed-configurations"] and yarn_site and yarn_min_allocation_property in yarn_site:
       yarn_min_container_size = yarn_site[yarn_min_allocation_property]
-      Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size))
+      self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size))
 
     # Check if 'yarn.scheduler.minimum-allocation-mb' is input in services array.
     elif yarn_site_properties and yarn_min_allocation_property in yarn_site_properties:
       yarn_min_container_size = yarn_site_properties[yarn_min_allocation_property]
-      Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
+      self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
 
     if not yarn_min_container_size:
-      Logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property))
+      self.logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property))
 
     return yarn_min_container_size
 
@@ -1482,10 +1481,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       elif total_cluster_capacity > 98304:
         calculated_tez_am_resource_memory_mb = 4096
 
-      Logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb))
+      self.logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb))
       return float(calculated_tez_am_resource_memory_mb)
     else:
-      Logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb))
+      self.logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb))
       return float(tez_am_resource_memory_mb)
 
   def get_tez_am_resource_memory_mb(self, services):
@@ -1544,7 +1543,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
                (2). Updates 'llap' queue capacity and state, if current selected queue is 'llap', and only 2 queues exist
                     at root level : 'default' and 'llap'.
     """
-    Logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
+    self.logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
     putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
     putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
@@ -1629,7 +1628,7 @@ yarn.scheduler.capacity.root.{0}.acl_administer_queue={2}
 yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_queue_name, llap_queue_cap_perc, hive_user)
 
           putCapSchedProperty("capacity-scheduler", updated_cap_sched_configs_str)
-          Logger.info("Updated 'capacity-scheduler' configs as one concatenated string.")
+          self.logger.info("Updated 'capacity-scheduler' configs as one concatenated string.")
         else:
           # If capacity-scheduler configs are received as a  dictionary (generally 1st time), we deposit the changed
           # values back as dictionary itself.
@@ -1661,16 +1660,16 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
           putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".acl_administer_queue", hive_user)
           putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".maximum-am-resource-percent", "1")
 
-          Logger.info("Updated 'capacity-scheduler' configs as a dictionary.")
+          self.logger.info("Updated 'capacity-scheduler' configs as a dictionary.")
           updated_cap_sched_configs_as_dict = True
 
         if updated_cap_sched_configs_str or updated_cap_sched_configs_as_dict:
           if len(leafQueueNames) == 1: # 'llap' queue didn't exist before
-            Logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \
+            self.logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \
                       .format(llap_queue_name, llap_queue_cap_perc, adjusted_default_queue_cap))
           else: # Queue existed, only adjustments done.
-            Logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc))
-            Logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
+            self.logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc))
+            self.logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
 
           # Update Hive 'hive.llap.daemon.queue.name' prop to use 'llap' queue.
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', llap_queue_name)
@@ -1678,9 +1677,9 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
           # Update 'hive.llap.daemon.queue.name' prop combo entries and llap capacity slider visibility.
           self.setLlapDaemonQueuePropAttributes(services, configurations)
       else:
-        Logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
+        self.logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
 
   """
   Checks and sees (1). If only two leaf queues exist at root level, namely: 'default' and 'llap',
@@ -1706,7 +1705,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
         if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in capacity_scheduler_properties.keys():
           currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state')
         else:
-          Logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
+          self.logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
           return
         if currLlapQueueState == 'RUNNING':
           DEFAULT_MAX_CAPACITY = '100'
@@ -1739,28 +1738,28 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
               elif prop.startswith('yarn.'):
                 updated_llap_queue_configs = updated_llap_queue_configs + prop + "=" + val + "\n"
         else:
-          Logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState))
+          self.logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState))
           return
 
         if updated_default_queue_configs and updated_llap_queue_configs:
           putCapSchedProperty("capacity-scheduler", updated_default_queue_configs+updated_llap_queue_configs)
-          Logger.info("Changed YARN '{0}' queue state to 'STOPPED', and capacity to 0%. Adjusted 'default' queue capacity to : {1}%" \
+          self.logger.info("Changed YARN '{0}' queue state to 'STOPPED', and capacity to 0%. Adjusted 'default' queue capacity to : {1}%" \
             .format(llap_queue_name, DEFAULT_MAX_CAPACITY))
 
           # Update Hive 'hive.llap.daemon.queue.name' prop to use 'default' queue.
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', self.YARN_ROOT_DEFAULT_QUEUE_NAME)
           putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', self.YARN_ROOT_DEFAULT_QUEUE_NAME)
       else:
-        Logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
+        self.logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
 
   def setLlapDaemonQueuePropAttributes(self, services, configurations):
     """
     Checks and sets the 'Hive Server Interactive' 'hive.llap.daemon.queue.name' config Property Attributes.  Takes into
     account that 'capacity-scheduler' may have changed (got updated) in current Stack Advisor invocation.
     """
-    Logger.info("Determining 'hive.llap.daemon.queue.name' config Property Attributes.")
+    self.logger.info("Determining 'hive.llap.daemon.queue.name' config Property Attributes.")
     #TODO Determine if this is doing the right thing if some queue is setup with capacity=0, or is STOPPED. Maybe don't list it.
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
 
@@ -1779,31 +1778,31 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
               for property in cap_sched_props_as_str:
                 key, sep, value = property.partition("=")
                 capacity_scheduler_properties[key] = value
-              Logger.info("'capacity-scheduler' configs is set as a single '\\n' separated string in current invocation. "
+              self.logger.info("'capacity-scheduler' configs is set as a single '\\n' separated string in current invocation. "
                           "count(configurations['capacity-scheduler']['properties']['capacity-scheduler']) = "
                           "{0}".format(len(capacity_scheduler_properties)))
             else:
-              Logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str))
+              self.logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str))
           else:
-            Logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str))
+            self.logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str))
 
         # if 'capacity_scheduler_properties' is empty, implies we may have 'capacity-scheduler' configs as dictionary
         # in configurations, if 'capacity-scheduler' changed in current invocation.
         if not capacity_scheduler_properties:
           if isinstance(cap_sched_props_as_dict, dict) and len(cap_sched_props_as_dict) > 1:
             capacity_scheduler_properties = cap_sched_props_as_dict
-            Logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.")
+            self.logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.")
           else:
-            Logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict))
+            self.logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict))
     else:
-      Logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.")
+      self.logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.")
 
 
     # if 'capacity_scheduler_properties' is still empty, implies 'capacity_scheduler' wasn't change in current
     # SA invocation. Thus, read it from input : 'services'.
     if not capacity_scheduler_properties:
       capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
-      Logger.info("'capacity-scheduler' not changed in current Stack Advisor invocation. Retrieved the configs from services.")
+      self.logger.info("'capacity-scheduler' not changed in current Stack Advisor invocation. Retrieved the configs from services.")
 
     # Get set of current YARN leaf queues.
     leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
@@ -1811,9 +1810,9 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
       leafQueues = [{"label": str(queueName), "value": queueName} for queueName in leafQueueNames]
       leafQueues = sorted(leafQueues, key=lambda q: q['value'])
       putHiveInteractiveSitePropertyAttribute("hive.llap.daemon.queue.name", "entries", leafQueues)
-      Logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues))
+      self.logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues))
     else:
-      Logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve "
+      self.logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve "
                    "'hive.server2.tez.default.queues' property attributes.")
 
   def __getQueueCapacityKeyFromCapacityScheduler(self, capacity_scheduler_properties, llap_daemon_selected_queue_name):
@@ -1827,7 +1826,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     for key in cap_sched_keys:
       # Expected capacity prop key is of form : 'yarn.scheduler.capacity.<one or more queues in path separated by '.'>.[llap_daemon_selected_queue_name].capacity'
       if key.endswith(llap_daemon_selected_queue_name+".capacity") and key.startswith("yarn.scheduler.capacity.root"):
-        Logger.info("DBG: Selected queue name as: " + key)
+        self.logger.info("DBG: Selected queue name as: " + key)
         llap_selected_queue_cap_key = key
         break;
     return llap_selected_queue_cap_key
@@ -1858,14 +1857,14 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     for key in cap_sched_keys:
       if key.endswith("."+llap_daemon_selected_queue_name+".maximum-am-resource-percent"):
         llap_selected_queue_am_percent_key = key
-        Logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key))
+        self.logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key))
         break;
     if llap_selected_queue_am_percent_key is None:
-      Logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name))
+      self.logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name))
       return 0.1 # Default value to use if we couldn't retrieve queue's corresponding AM Percent key.
     else:
       llap_selected_queue_am_percent = capacity_scheduler_properties.get(llap_selected_queue_am_percent_key)
-      Logger.info("Returning read value for key '{0}' as : '{1}' for queue : '{2}'".format(llap_selected_queue_am_percent_key,
+      self.logger.info("Returning read value for key '{0}' as : '{1}' for queue : '{2}'".format(llap_selected_queue_am_percent_key,
                                                                                      llap_selected_queue_am_percent,
                                                                                      llap_daemon_selected_queue_name))
       return llap_selected_queue_am_percent
@@ -1874,7 +1873,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     """
     Calculates the total available capacity for the passed-in YARN queue of any level based on the percentages.
     """
-    Logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name))
+    self.logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name))
     available_capacity = total_cluster_capacity
     queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
     if queue_cap_key:
@@ -1884,13 +1883,13 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
         queue_path = queue_cap_key[24:]  # Strip from beginning 'yarn.scheduler.capacity.'
         queue_path = queue_path[0:-9]  # Strip from end '.capacity'
         queues_list = queue_path.split('.')
-        Logger.info("Queue list : {0}".format(queues_list))
+        self.logger.info("Queue list : {0}".format(queues_list))
         if queues_list:
           for queue in queues_list:
             queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, queue)
             queue_cap_perc = float(capacity_scheduler_properties.get(queue_cap_key))
             available_capacity = queue_cap_perc / 100 * available_capacity
-            Logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity))
+            self.logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity))
 
     # returns the capacity calculated for passed-in queue in 'llap_daemon_selected_queue_name'.
     return available_capacity

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 53ff007..7881917 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -16,7 +16,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 """
-from resource_management.core.logger import Logger
 import json
 import re
 from resource_management.libraries.functions import format
@@ -25,7 +24,7 @@ from resource_management.libraries.functions import format
 class HDP26StackAdvisor(HDP25StackAdvisor):
   def __init__(self):
       super(HDP26StackAdvisor, self).__init__()
-      Logger.initialize_logger()
+      self.initialize_logger("HDP26StackAdvisor")
 
   def getServiceConfigurationRecommenderDict(self):
       parentRecommendConfDict = super(HDP26StackAdvisor, self).getServiceConfigurationRecommenderDict()
@@ -181,11 +180,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
         putYarnSiteProperty('yarn.log.server.web-service.url',webservice_url )
 
     if ranger_yarn_plugin_enabled and 'ranger-yarn-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
-      Logger.info("Setting Yarn Repo user for Ranger.")
+      self.logger.info("Setting Yarn Repo user for Ranger.")
       putRangerYarnPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
       putRangerYarnPluginProperty("REPOSITORY_CONFIG_USERNAME",yarn_user)
     else:
-      Logger.info("Not setting Yarn Repo user for Ranger.")
+      self.logger.info("Not setting Yarn Repo user for Ranger.")
 
   def getMetadataConnectionString(self, database_type):
       driverDict = {
@@ -282,7 +281,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       else:
          webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.https.address"]
          propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
-      Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
+      self.logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
       if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
          validationItems = [
               {"config-name": "yarn.log.server.web-service.url",
@@ -325,7 +324,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     tez_jvm_updated_opts = tez_jvm_opts + jvmGCParams + "{{heap_dump_opts}}"
     putTezProperty('tez.am.launch.cmd-opts', tez_jvm_updated_opts)
     putTezProperty('tez.task.launch.cmd-opts', tez_jvm_updated_opts)
-    Logger.info("Updated 'tez-site' config 'tez.task.launch.cmd-opts' and 'tez.am.launch.cmd-opts' as "
+    self.logger.info("Updated 'tez-site' config 'tez.task.launch.cmd-opts' and 'tez.am.launch.cmd-opts' as "
                 ": {0}".format(tez_jvm_updated_opts))
 
   def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
@@ -390,11 +389,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       ranger_hdfs_plugin_enabled = False
 
     if ranger_hdfs_plugin_enabled and 'ranger-hdfs-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
-      Logger.info("Setting HDFS Repo user for Ranger.")
+      self.logger.info("Setting HDFS Repo user for Ranger.")
       putRangerHDFSPluginProperty = self.putProperty(configurations, "ranger-hdfs-plugin-properties", services)
       putRangerHDFSPluginProperty("REPOSITORY_CONFIG_USERNAME",hdfs_user)
     else:
-      Logger.info("Not setting HDFS Repo user for Ranger.")
+      self.logger.info("Not setting HDFS Repo user for Ranger.")
 
   def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
@@ -411,11 +410,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       ranger_hive_plugin_enabled = False
 
     if ranger_hive_plugin_enabled and 'ranger-hive-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hive-plugin-properties']['properties']:
-      Logger.info("Setting Hive Repo user for Ranger.")
+      self.logger.info("Setting Hive Repo user for Ranger.")
       putRangerHivePluginProperty = self.putProperty(configurations, "ranger-hive-plugin-properties", services)
       putRangerHivePluginProperty("REPOSITORY_CONFIG_USERNAME",hive_user)
     else:
-      Logger.info("Not setting Hive Repo user for Ranger.")
+      self.logger.info("Not setting Hive Repo user for Ranger.")
 
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
@@ -432,11 +431,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       ranger_hbase_plugin_enabled = False
 
     if ranger_hbase_plugin_enabled and 'ranger-hbase-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
-      Logger.info("Setting Hbase Repo user for Ranger.")
+      self.logger.info("Setting Hbase Repo user for Ranger.")
       putRangerHbasePluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
       putRangerHbasePluginProperty("REPOSITORY_CONFIG_USERNAME",hbase_user)
     else:
-      Logger.info("Not setting Hbase Repo user for Ranger.")
+      self.logger.info("Not setting Hbase Repo user for Ranger.")
 
   def recommendKAFKAConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendKAFKAConfigurations(configurations, clusterData, services, hosts)
@@ -453,8 +452,8 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       ranger_kafka_plugin_enabled = False
 
     if ranger_kafka_plugin_enabled and 'ranger-kafka-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
-      Logger.info("Setting Kafka Repo user for Ranger.")
+      self.logger.info("Setting Kafka Repo user for Ranger.")
       putRangerKafkaPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
       putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
     else:
-      Logger.info("Not setting Kafka Repo user for Ranger.")
+      self.logger.info("Not setting Kafka Repo user for Ranger.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
index c7d9327..c1dcce8 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
@@ -24,7 +24,6 @@ import os
 from math import ceil
 
 # Local Imports
-from resource_management.core.logger import Logger
 from stack_advisor import DefaultStackAdvisor
 
 class HDPWIN21StackAdvisor(DefaultStackAdvisor):
@@ -32,6 +31,8 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
   def __init__(self):
     super(HDPWIN21StackAdvisor, self).__init__()
 
+    self.initialize_logger("HDPWIN21StackAdvisor")
+
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()
     self.modifyHeapSizeProperties()
@@ -409,9 +410,9 @@ class HDPWIN21StackAdvisor(DefaultStackAdvisor):
             siteProperties = getSiteProperties(configurations, siteName)
             if siteProperties is not None:
               siteRecommendations = recommendedDefaults[siteName]["properties"]
-              print("SiteName: %s, method: %s\n" % (siteName, method.__name__))
-              print("Site properties: %s\n" % str(siteProperties))
-              print("Recommendations: %s\n********\n" % str(siteRecommendations))
+              self.logger.info("SiteName: %s, method: %s\n" % (siteName, method.__name__))
+              self.logger.info("Site properties: %s\n" % str(siteProperties))
+              self.logger.info("Recommendations: %s\n********\n" % str(siteRecommendations))
               resultItems = method(siteProperties, siteRecommendations, configurations, services, hosts)
               items.extend(resultItems)
     clusterWideItems = self.validateClusterConfigurations(configurations, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py
index a3d9598..7d11640 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/stack_advisor.py
@@ -25,7 +25,6 @@ import sys
 from urlparse import urlparse
 
 # Local Imports
-from resource_management.core.logger import Logger
 
 def getSiteProperties(configurations, siteName):
   siteConfig = configurations.get(siteName)
@@ -37,7 +36,7 @@ class HDPWIN22StackAdvisor(HDPWIN21StackAdvisor):
 
   def __init__(self):
     super(HDPWIN22StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDPWIN22StackAdvisor")
 
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/PERF/1.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/stack_advisor.py
index 43d417e..835311a 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/stack_advisor.py
@@ -20,7 +20,6 @@ limitations under the License.
 # Python Imports
 
 # Local Imports
-from resource_management.core.logger import Logger
 from stack_advisor import DefaultStackAdvisor
 
 
@@ -28,7 +27,7 @@ class PERF10StackAdvisor(DefaultStackAdvisor):
 
   def __init__(self):
     super(PERF10StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("PERF10StackAdvisor")
 
   def getServiceConfigurationRecommenderDict(self):
     return {}


Mime
View raw message