ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [3/4] ambari git commit: AMBARI-20617. Display log level, method name from stack_advisor in ambari-server.log (dlysnichenko)
Date Thu, 30 Mar 2017 12:49:10 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 103b934..fc32001 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -27,7 +27,6 @@ import math
 from math import floor, ceil
 
 # Local imports
-from resource_management.core.logger import Logger
 
 
 SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -48,6 +47,8 @@ class YARNServiceAdvisor(service_advisor.ServiceAdvisor):
     self.as_super = super(YARNServiceAdvisor, self)
     self.as_super.__init__(*args, **kwargs)
 
+    self.initialize_logger("YARNServiceAdvisorf")
+
     self.CLUSTER_CREATE_OPERATION = "ClusterCreate"
 
     # Always call these methods
@@ -112,7 +113,7 @@ class YARNServiceAdvisor(service_advisor.ServiceAdvisor):
     Get a list of errors.
     Must be overriden in child class.
     """
-    Logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
+    self.logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     return self.as_super.getServiceComponentLayoutValidations(services, hosts)
@@ -122,7 +123,7 @@ class YARNServiceAdvisor(service_advisor.ServiceAdvisor):
     Entry point.
     Must be overriden in child class.
     """
-    Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     # Due to the existing stack inheritance, make it clear where each calculation came from.
@@ -141,7 +142,7 @@ class YARNServiceAdvisor(service_advisor.ServiceAdvisor):
     Validate configurations for the service. Return a list of errors.
     The code for this function should be the same for each Service Advisor.
     """
-    Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    self.logger.info("Class: %s, Method: %s. Validating Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     validator = YARNValidator()
@@ -219,7 +220,7 @@ class MAPREDUCE2ServiceAdvisor(service_advisor.ServiceAdvisor):
     Get a list of errors.
     Must be overriden in child class.
     """
-    Logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
+    self.logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     return self.as_super.getServiceComponentLayoutValidations(services, hosts)
@@ -229,7 +230,7 @@ class MAPREDUCE2ServiceAdvisor(service_advisor.ServiceAdvisor):
     Entry point.
     Must be overriden in child class.
     """
-    Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     # Due to the existing stack inheritance, make it clear where each calculation came from.
@@ -243,7 +244,7 @@ class MAPREDUCE2ServiceAdvisor(service_advisor.ServiceAdvisor):
     Validate configurations for the service. Return a list of errors.
     The code for this function should be the same for each Service Advisor.
     """
-    Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    self.logger.info("Class: %s, Method: %s. Validating Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     validator = YARNValidator()
@@ -269,7 +270,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     """
     Recommend configurations for this service based on HDP 2.0.6.
     """
-    Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
@@ -449,21 +450,21 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
            webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.address"]
            webservice_url = "http://"+webapp_address+"/ws/v1/applicationhistory"
          else:
-           Logger.error("Required config yarn.timeline-service.webapp.address in yarn-site does not exist. Unable to set yarn.log.server.web-service.url")
+           self.logger.error("Required config yarn.timeline-service.webapp.address in yarn-site does not exist. Unable to set yarn.log.server.web-service.url")
       else:
          if "yarn.timeline-service.webapp.https.address" in services["configurations"]["yarn-site"]["properties"]:
            webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.https.address"]
            webservice_url = "https://"+webapp_address+"/ws/v1/applicationhistory"
          else:
-           Logger.error("Required config yarn.timeline-service.webapp.https.address in yarn-site does not exist. Unable to set yarn.log.server.web-service.url")
+           self.logger.error("Required config yarn.timeline-service.webapp.https.address in yarn-site does not exist. Unable to set yarn.log.server.web-service.url")
       putYarnSiteProperty('yarn.log.server.web-service.url',webservice_url )
 
     if ranger_yarn_plugin_enabled and 'ranger-yarn-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
-      Logger.info("Setting Yarn Repo user for Ranger.")
+      self.logger.info("Setting Yarn Repo user for Ranger.")
       putRangerYarnPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
       putRangerYarnPluginProperty("REPOSITORY_CONFIG_USERNAME",yarn_user)
     else:
-      Logger.info("Not setting Yarn Repo user for Ranger.")
+      self.logger.info("Not setting Yarn Repo user for Ranger.")
 
 
   #region LLAP
@@ -485,14 +486,14 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
       Note: All memory calculations are in MB, unless specified otherwise.
     """
-    Logger.info("DBG: Entered updateLlapConfigs")
+    self.logger.info("DBG: Entered updateLlapConfigs")
 
     # Determine if we entered here during cluster creation.
     operation = getUserOperationContext(services, "operation")
     is_cluster_create_opr = False
     if operation == self.CLUSTER_CREATE_OPERATION:
       is_cluster_create_opr = True
-    Logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr))
+    self.logger.info("Is cluster create operation ? = {0}".format(is_cluster_create_opr))
 
     putHiveInteractiveSiteProperty = self.putProperty(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE)
@@ -527,9 +528,9 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     if capacity_scheduler_properties:
       # Get all leaf queues.
       leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
-      Logger.info("YARN leaf Queues = {0}".format(leafQueueNames))
+      self.logger.info("YARN leaf Queues = {0}".format(leafQueueNames))
       if len(leafQueueNames) == 0:
-        Logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.")
+        self.logger.error("Queue(s) couldn't be retrieved from capacity-scheduler.")
         return
 
       # Check if it's 1st invocation after enabling Hive Server Interactive (config: enable_hive_interactive).
@@ -548,20 +549,20 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', first_leaf_queue)
           putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', first_leaf_queue)
           llap_named_queue_selected_in_curr_invocation = False
-      Logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation))
+      self.logger.info("DBG: llap_named_queue_selected_in_curr_invocation = {0}".format(llap_named_queue_selected_in_curr_invocation))
 
       if (len(leafQueueNames) == 2 and (llap_daemon_selected_queue_name and llap_daemon_selected_queue_name == llap_queue_name) or
             llap_named_queue_selected_in_curr_invocation) or \
         (len(leafQueueNames) == 1 and llap_daemon_selected_queue_name == 'default' and llap_named_queue_selected_in_curr_invocation):
-        Logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'False'.")
+        self.logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'False'.")
         putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "false")
         selected_queue_is_ambari_managed_llap = True
-        Logger.info("DBG: Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
+        self.logger.info("DBG: Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
                     "slider visibility to 'True'".format(llap_queue_name, list(leafQueueNames)))
       else:
-        Logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
+        self.logger.info("DBG: Setting 'num_llap_nodes' config's  READ ONLY attribute as 'True'.")
         putHiveInteractiveEnvPropertyAttribute("num_llap_nodes", "read_only", "true")
-        Logger.info("Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
+        self.logger.info("Selected YARN queue for LLAP is : '{0}'. Current YARN queues : {1}. Setting 'Number of LLAP nodes' "
                     "visibility to 'False'.".format(llap_daemon_selected_queue_name, list(leafQueueNames)))
         selected_queue_is_ambari_managed_llap = False
 
@@ -571,17 +572,17 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
         if llap_daemon_selected_queue_name:
           llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
           if llap_selected_queue_state is None or llap_selected_queue_state == "STOPPED":
-            Logger.error("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default "
+            self.logger.error("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default "
                          "values.".format(llap_daemon_selected_queue_name, llap_selected_queue_state))
             self.recommendDefaultLlapConfiguration(configurations, services, hosts)
             return
         else:
-          Logger.error("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
+          self.logger.error("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
                        .format(llap_daemon_selected_queue_name))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
                    " Not calculating LLAP configs.")
       return
 
@@ -603,16 +604,16 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
     if not changed_configs_in_hive_int_env and not llap_concurrency_in_changed_configs and \
         not llap_daemon_queue_in_changed_configs and services["changed-configurations"]:
-      Logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.")
-      Logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
+      self.logger.info("DBG: LLAP parameters not modified. Not adjusting LLAP configs.")
+      self.logger.info("DBG: Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
       return
 
-    Logger.info("\nDBG: Performing LLAP config calculations ......")
+    self.logger.info("\nDBG: Performing LLAP config calculations ......")
     node_manager_host_list = self.getHostsForComponent(services, "YARN", "NODEMANAGER")
     node_manager_cnt = len(node_manager_host_list)
     yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
     total_cluster_capacity = node_manager_cnt * yarn_nm_mem_in_mb
-    Logger.info("DBG: Calculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
+    self.logger.info("DBG: Calculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
                 "yarn_nm_mem_in_mb : {2}".format(total_cluster_capacity, node_manager_cnt, yarn_nm_mem_in_mb))
     yarn_min_container_size = float(self.get_yarn_min_container_size(services, configurations))
     tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_capacity), is_cluster_create_opr,
@@ -624,7 +625,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     else:
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
+    self.logger.info("DBG Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
                 "total_cluster_capacity : {2}".format(normalized_tez_am_container_size, tez_am_container_size,
                                                       total_cluster_capacity))
 
@@ -632,7 +633,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     yarn_nm_mem_in_mb_normalized = self._normalizeDown(yarn_nm_mem_in_mb, yarn_min_container_size)
     mem_per_thread_for_llap = float(self.calculate_mem_per_thread_for_llap(services, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host,
                                                                            is_cluster_create_opr, changed_configs_has_enable_hive_int))
-    Logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, "
+    self.logger.info("DBG: Calculated mem_per_thread_for_llap : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, "
                 "cpu_per_nm_host : {2}".format(mem_per_thread_for_llap, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host))
 
 
@@ -643,48 +644,48 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     # Get calculated value for Slider AM container Size
     slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
                                                  yarn_min_container_size)
-    Logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size : "
+    self.logger.info("DBG: Calculated 'slider_am_container_size' : {0}, using following: yarn_min_container_size : "
                 "{1}".format(slider_am_container_size, yarn_min_container_size))
 
     min_memory_required = normalized_tez_am_container_size + slider_am_container_size + self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
-    Logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size: {1}, "
+    self.logger.info("DBG: Calculated 'min_memory_required': {0} using following : slider_am_container_size: {1}, "
                 "normalized_tez_am_container_size : {2}, mem_per_thread_for_llap : {3}, yarn_min_container_size : "
                 "{4}".format(min_memory_required, slider_am_container_size, normalized_tez_am_container_size, mem_per_thread_for_llap, yarn_min_container_size))
 
     min_nodes_required = int(ceil( min_memory_required / yarn_nm_mem_in_mb_normalized))
-    Logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required : {1}, yarn_nm_mem_in_mb_normalized "
+    self.logger.info("DBG: Calculated 'min_node_required': {0}, using following : min_memory_required : {1}, yarn_nm_mem_in_mb_normalized "
                 ": {2}".format(min_nodes_required, min_memory_required, yarn_nm_mem_in_mb_normalized))
     if min_nodes_required > node_manager_cnt:
-      Logger.warning("ERROR: Not enough memory/nodes to run LLAP");
+      self.logger.warning("ERROR: Not enough memory/nodes to run LLAP");
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
     mem_per_thread_for_llap = float(mem_per_thread_for_llap)
 
-    Logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
+    self.logger.info("DBG: selected_queue_is_ambari_managed_llap = {0}".format(selected_queue_is_ambari_managed_llap))
     if not selected_queue_is_ambari_managed_llap:
       llap_daemon_selected_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_daemon_selected_queue_name, total_cluster_capacity)
 
       if llap_daemon_selected_queue_cap <= 0:
-        Logger.warning("'{0}' queue capacity percentage retrieved = {1}. Expected > 0.".format(
+        self.logger.warning("'{0}' queue capacity percentage retrieved = {1}. Expected > 0.".format(
           llap_daemon_selected_queue_name, llap_daemon_selected_queue_cap))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
       total_llap_mem_normalized = self._normalizeDown(llap_daemon_selected_queue_cap, yarn_min_container_size)
-      Logger.info("DBG: Calculated '{0}' queue available capacity : {1}, using following: llap_daemon_selected_queue_cap : {2}, "
+      self.logger.info("DBG: Calculated '{0}' queue available capacity : {1}, using following: llap_daemon_selected_queue_cap : {2}, "
                   "yarn_min_container_size : {3}".format(llap_daemon_selected_queue_name, total_llap_mem_normalized,
                                                          llap_daemon_selected_queue_cap, yarn_min_container_size))
       '''Rounding up numNodes so that we run more daemons, and utilitze more CPUs. The rest of the calcaulations will take care of cutting this down if required'''
       num_llap_nodes_requested = ceil(total_llap_mem_normalized / yarn_nm_mem_in_mb_normalized)
-      Logger.info("DBG: Calculated 'num_llap_nodes_requested' : {0}, using following: total_llap_mem_normalized : {1}, "
+      self.logger.info("DBG: Calculated 'num_llap_nodes_requested' : {0}, using following: total_llap_mem_normalized : {1}, "
                   "yarn_nm_mem_in_mb_normalized : {2}".format(num_llap_nodes_requested, total_llap_mem_normalized, yarn_nm_mem_in_mb_normalized))
       # Pouplate the 'num_llap_nodes_requested' in config 'num_llap_nodes', a read only config for non-Ambari managed queue case.
       putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes_requested)
-      Logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested))
+      self.logger.info("Setting config 'num_llap_nodes' as : {0}".format(num_llap_nodes_requested))
       queue_am_fraction_perc = float(self.__getQueueAmFractionFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name))
       hive_tez_am_cap_available = queue_am_fraction_perc * total_llap_mem_normalized
-      Logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, "
+      self.logger.info("DBG: Calculated 'hive_tez_am_cap_available' : {0}, using following: queue_am_fraction_perc : {1}, "
                   "total_llap_mem_normalized : {2}".format(hive_tez_am_cap_available, queue_am_fraction_perc, total_llap_mem_normalized))
     else:  # Ambari managed 'llap' named queue at root level.
       # Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
@@ -697,34 +698,34 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
       else:
         num_llap_nodes_requested = self.get_num_llap_nodes(services, configurations) #Input
       total_llap_mem = num_llap_nodes_requested * yarn_nm_mem_in_mb_normalized
-      Logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested : {1}, "
+      self.logger.info("DBG: Calculated 'total_llap_mem' : {0}, using following: num_llap_nodes_requested : {1}, "
                   "yarn_nm_mem_in_mb_normalized : {2}".format(total_llap_mem, num_llap_nodes_requested, yarn_nm_mem_in_mb_normalized))
       total_llap_mem_normalized = float(self._normalizeDown(total_llap_mem, yarn_min_container_size))
-      Logger.info("DBG: Calculated 'total_llap_mem_normalized' : {0}, using following: total_llap_mem : {1}, "
+      self.logger.info("DBG: Calculated 'total_llap_mem_normalized' : {0}, using following: total_llap_mem : {1}, "
                   "yarn_min_container_size : {2}".format(total_llap_mem_normalized, total_llap_mem, yarn_min_container_size))
 
       # What percent is 'total_llap_mem' of 'total_cluster_capacity' ?
       llap_named_queue_cap_fraction = ceil(total_llap_mem_normalized / total_cluster_capacity * 100)
-      Logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction))
+      self.logger.info("DBG: Calculated '{0}' queue capacity percent = {1}.".format(llap_queue_name, llap_named_queue_cap_fraction))
 
       if llap_named_queue_cap_fraction > 100:
-        Logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction))
+        self.logger.warning("Calculated '{0}' queue size = {1}. Cannot be > 100.".format(llap_queue_name, llap_named_queue_cap_fraction))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
       # Adjust capacity scheduler for the 'llap' named queue.
       self.checkAndManageLlapQueue(services, configurations, hosts, llap_queue_name, llap_named_queue_cap_fraction)
       hive_tez_am_cap_available = total_llap_mem_normalized
-      Logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available))
+      self.logger.info("DBG: hive_tez_am_cap_available : {0}".format(hive_tez_am_cap_available))
 
     # Common calculations now, irrespective of the queue selected.
 
     llap_mem_for_tezAm_and_daemons = total_llap_mem_normalized - slider_am_container_size
-    Logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following : total_llap_mem_normalized : {1}, "
+    self.logger.info("DBG: Calculated 'llap_mem_for_tezAm_and_daemons' : {0}, using following : total_llap_mem_normalized : {1}, "
                 "slider_am_container_size : {2}".format(llap_mem_for_tezAm_and_daemons, total_llap_mem_normalized, slider_am_container_size))
 
     if llap_mem_for_tezAm_and_daemons < 2 * yarn_min_container_size:
-      Logger.warning("Not enough capacity available on the cluster to run LLAP")
+      self.logger.warning("Not enough capacity available on the cluster to run LLAP")
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
@@ -734,11 +735,11 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     # Read 'hive.server2.tez.sessions.per.default.queue' prop if it's in changed-configs, else calculate it.
     if not llap_concurrency_in_changed_configs:
       if max_executors_per_node <= 0:
-        Logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node))
+        self.logger.warning("Calculated 'max_executors_per_node' = {0}. Expected value >= 1.".format(max_executors_per_node))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
-      Logger.info("DBG: Calculated 'max_executors_per_node' : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
+      self.logger.info("DBG: Calculated 'max_executors_per_node' : {0}, using following: yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
                   "mem_per_thread_for_llap: {3}".format(max_executors_per_node, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
 
       # Default 1 AM for every 20 executor threads.
@@ -746,122 +747,122 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
       # making use of total memory. However, it's possible that total memory will not be used - and the numExecutors is
       # instead limited by #CPUs. Use maxPerNode to factor this in.
       llap_concurreny_limit = min(floor(max_executors_per_node * num_llap_nodes_requested / DEFAULT_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
-      Logger.info("DBG: Calculated 'llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested : {2}, DEFAULT_EXECUTOR_TO_AM_RATIO "
+      self.logger.info("DBG: Calculated 'llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested : {2}, DEFAULT_EXECUTOR_TO_AM_RATIO "
                   ": {3}, MAX_CONCURRENT_QUERIES : {4}".format(llap_concurreny_limit, max_executors_per_node, num_llap_nodes_requested, DEFAULT_EXECUTOR_TO_AM_RATIO, MAX_CONCURRENT_QUERIES))
       llap_concurrency = min(llap_concurreny_limit, floor(llap_mem_for_tezAm_and_daemons / (DEFAULT_EXECUTOR_TO_AM_RATIO * mem_per_thread_for_llap + normalized_tez_am_container_size)))
-      Logger.info("DBG: Calculated 'llap_concurrency' : {0}, using following : llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
+      self.logger.info("DBG: Calculated 'llap_concurrency' : {0}, using following : llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
                   "{2}, DEFAULT_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
                   "{5}".format(llap_concurrency, llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, DEFAULT_EXECUTOR_TO_AM_RATIO,
                                mem_per_thread_for_llap, normalized_tez_am_container_size))
       if llap_concurrency == 0:
         llap_concurrency = 1
-        Logger.info("DBG: Readjusted 'llap_concurrency' to : 1. Earlier calculated value : 0")
+        self.logger.info("DBG: Readjusted 'llap_concurrency' to : 1. Earlier calculated value : 0")
 
       if llap_concurrency * normalized_tez_am_container_size > hive_tez_am_cap_available:
         llap_concurrency = long(math.floor(hive_tez_am_cap_available / normalized_tez_am_container_size))
-        Logger.info("DBG: Readjusted 'llap_concurrency' to : {0}, as llap_concurrency({1}) * normalized_tez_am_container_size({2}) > hive_tez_am_cap_available({3}))"
+        self.logger.info("DBG: Readjusted 'llap_concurrency' to : {0}, as llap_concurrency({1}) * normalized_tez_am_container_size({2}) > hive_tez_am_cap_available({3}))"
                     .format(llap_concurrency, llap_concurrency, normalized_tez_am_container_size, hive_tez_am_cap_available))
 
         if llap_concurrency <= 0:
-          Logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency))
+          self.logger.warning("DBG: Calculated 'LLAP Concurrent Queries' = {0}. Expected value >= 1.".format(llap_concurrency))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
-        Logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
+        self.logger.info("DBG: Adjusted 'llap_concurrency' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
                     "{2}".format(llap_concurrency, hive_tez_am_cap_available, normalized_tez_am_container_size))
     else:
       # Read current value
       if 'hive.server2.tez.sessions.per.default.queue' in hsi_site:
         llap_concurrency = long(hsi_site['hive.server2.tez.sessions.per.default.queue'])
         if llap_concurrency <= 0:
-          Logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency))
+          self.logger.warning("'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1".format(llap_concurrency))
           self.recommendDefaultLlapConfiguration(configurations, services, hosts)
           return
-        Logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency ))
+        self.logger.info("DBG: Read 'llap_concurrency' : {0}".format(llap_concurrency ))
       else:
         llap_concurrency = 1
-        Logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.")
+        self.logger.warning("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config. Setting default value 1.")
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
 
     # Calculate 'Max LLAP Consurrency', irrespective of whether 'llap_concurrency' was read or calculated.
     max_llap_concurreny_limit = min(floor(max_executors_per_node * num_llap_nodes_requested / MIN_EXECUTOR_TO_AM_RATIO), MAX_CONCURRENT_QUERIES)
-    Logger.info("DBG: Calculated 'max_llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested "
+    self.logger.info("DBG: Calculated 'max_llap_concurreny_limit' : {0}, using following : max_executors_per_node : {1}, num_llap_nodes_requested "
                 ": {2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, MAX_CONCURRENT_QUERIES : {4}".format(max_llap_concurreny_limit, max_executors_per_node,
                                                                                              num_llap_nodes_requested, MIN_EXECUTOR_TO_AM_RATIO,
                                                                                              MAX_CONCURRENT_QUERIES))
     max_llap_concurreny = long(min(max_llap_concurreny_limit, floor(llap_mem_for_tezAm_and_daemons / (MIN_EXECUTOR_TO_AM_RATIO *
                                                                                                       mem_per_thread_for_llap + normalized_tez_am_container_size))))
-    Logger.info("DBG: Calculated 'max_llap_concurreny' : {0}, using following : max_llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
+    self.logger.info("DBG: Calculated 'max_llap_concurreny' : {0}, using following : max_llap_concurreny_limit : {1}, llap_mem_for_tezAm_and_daemons : "
                 "{2}, MIN_EXECUTOR_TO_AM_RATIO : {3}, mem_per_thread_for_llap : {4}, normalized_tez_am_container_size : "
                 "{5}".format(max_llap_concurreny, max_llap_concurreny_limit, llap_mem_for_tezAm_and_daemons, MIN_EXECUTOR_TO_AM_RATIO,
                              mem_per_thread_for_llap, normalized_tez_am_container_size))
     if int(max_llap_concurreny) < MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS:
-      Logger.info("DBG: Adjusting 'max_llap_concurreny' from {0} to {1}".format(max_llap_concurreny, MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS))
+      self.logger.info("DBG: Adjusting 'max_llap_concurreny' from {0} to {1}".format(max_llap_concurreny, MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS))
       max_llap_concurreny = MAX_CONCURRENT_QUERIES_SMALL_CLUSTERS
 
     if (max_llap_concurreny * normalized_tez_am_container_size) > hive_tez_am_cap_available:
       max_llap_concurreny = floor(hive_tez_am_cap_available / normalized_tez_am_container_size)
       if max_llap_concurreny <= 0:
-        Logger.warning("Calculated 'Max. LLAP Concurrent Queries' = {0}. Expected value > 1".format(max_llap_concurreny))
+        self.logger.warning("Calculated 'Max. LLAP Concurrent Queries' = {0}. Expected value > 1".format(max_llap_concurreny))
         self.recommendDefaultLlapConfiguration(configurations, services, hosts)
         return
-      Logger.info("DBG: Adjusted 'max_llap_concurreny' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
+      self.logger.info("DBG: Adjusted 'max_llap_concurreny' : {0}, using following: hive_tez_am_cap_available : {1}, normalized_tez_am_container_size: "
                   "{2}".format(max_llap_concurreny, hive_tez_am_cap_available, normalized_tez_am_container_size))
 
     # Calculate value for 'num_llap_nodes', an across cluster config.
     tez_am_memory_required = llap_concurrency * normalized_tez_am_container_size
-    Logger.info("DBG: Calculated 'tez_am_memory_required' : {0}, using following : llap_concurrency : {1}, normalized_tez_am_container_size : "
+    self.logger.info("DBG: Calculated 'tez_am_memory_required' : {0}, using following : llap_concurrency : {1}, normalized_tez_am_container_size : "
                 "{2}".format(tez_am_memory_required, llap_concurrency, normalized_tez_am_container_size))
     llap_mem_daemon_size = llap_mem_for_tezAm_and_daemons - tez_am_memory_required
 
     if llap_mem_daemon_size < yarn_min_container_size:
-      Logger.warning("Calculated 'LLAP Daemon Size = {0}'. Expected >= 'YARN Minimum Container Size' ({1})'".format(
+      self.logger.warning("Calculated 'LLAP Daemon Size = {0}'. Expected >= 'YARN Minimum Container Size' ({1})'".format(
         llap_mem_daemon_size, yarn_min_container_size))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
 
     if llap_mem_daemon_size < mem_per_thread_for_llap or llap_mem_daemon_size < yarn_min_container_size:
-      Logger.warning("Not enough memory available for executors.")
+      self.logger.warning("Not enough memory available for executors.")
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'llap_mem_daemon_size' : {0}, using following : llap_mem_for_tezAm_and_daemons : {1}, tez_am_memory_required : "
+    self.logger.info("DBG: Calculated 'llap_mem_daemon_size' : {0}, using following : llap_mem_for_tezAm_and_daemons : {1}, tez_am_memory_required : "
                 "{2}".format(llap_mem_daemon_size, llap_mem_for_tezAm_and_daemons, tez_am_memory_required))
 
     llap_daemon_mem_per_node = self._normalizeDown(llap_mem_daemon_size / num_llap_nodes_requested, yarn_min_container_size)
-    Logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
+    self.logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
                 "yarn_min_container_size: {3}".format(llap_daemon_mem_per_node, llap_mem_daemon_size, num_llap_nodes_requested, yarn_min_container_size))
     if llap_daemon_mem_per_node == 0:
       # Small cluster. No capacity left on a node after running AMs.
       llap_daemon_mem_per_node = self._normalizeUp(mem_per_thread_for_llap, yarn_min_container_size)
       num_llap_nodes = floor(llap_mem_daemon_size / llap_daemon_mem_per_node)
-      Logger.info("DBG: 'llap_daemon_mem_per_node' : 0, adjusted 'llap_daemon_mem_per_node' : {0}, 'num_llap_nodes' : {1}, using following: llap_mem_daemon_size : {2}, "
+      self.logger.info("DBG: 'llap_daemon_mem_per_node' : 0, adjusted 'llap_daemon_mem_per_node' : {0}, 'num_llap_nodes' : {1}, using following: llap_mem_daemon_size : {2}, "
                   "mem_per_thread_for_llap : {3}".format(llap_daemon_mem_per_node, num_llap_nodes, llap_mem_daemon_size, mem_per_thread_for_llap))
     elif llap_daemon_mem_per_node < mem_per_thread_for_llap:
       # Previously computed value of memory per thread may be too high. Cut the number of nodes. (Alternately reduce memory per node)
       llap_daemon_mem_per_node = mem_per_thread_for_llap
       num_llap_nodes = floor(llap_mem_daemon_size / mem_per_thread_for_llap)
-      Logger.info("DBG: 'llap_daemon_mem_per_node'({0}) < mem_per_thread_for_llap({1}), adjusted 'llap_daemon_mem_per_node' "
+      self.logger.info("DBG: 'llap_daemon_mem_per_node'({0}) < mem_per_thread_for_llap({1}), adjusted 'llap_daemon_mem_per_node' "
                   ": {2}".format(llap_daemon_mem_per_node, mem_per_thread_for_llap, llap_daemon_mem_per_node))
     else:
       # All good. We have a proper value for memoryPerNode.
       num_llap_nodes = num_llap_nodes_requested
-      Logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
+      self.logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
 
     num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
     if num_executors_per_node_max < 1:
-      Logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
+      self.logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
+    self.logger.info("DBG: Calculated 'num_executors_per_node_max' : {0}, using following : yarn_nm_mem_in_mb_normalized : {1}, cpu_per_nm_host : {2}, "
                 "mem_per_thread_for_llap: {3}".format(num_executors_per_node_max, yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap))
 
     # NumExecutorsPerNode is not necessarily max - since some capacity would have been reserved for AMs, if this value were based on mem.
     num_executors_per_node = min(floor(llap_daemon_mem_per_node / mem_per_thread_for_llap), num_executors_per_node_max)
     if num_executors_per_node <= 0:
-      Logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node))
+      self.logger.warning("Calculated 'Number of Executors Per Node' = {0}. Expected value >= 1".format(num_executors_per_node))
       self.recommendDefaultLlapConfiguration(configurations, services, hosts)
       return
-    Logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, "
+    self.logger.info("DBG: Calculated 'num_executors_per_node' : {0}, using following : llap_daemon_mem_per_node : {1}, num_executors_per_node_max : {2}, "
                 "mem_per_thread_for_llap: {3}".format(num_executors_per_node, llap_daemon_mem_per_node, num_executors_per_node_max, mem_per_thread_for_llap))
 
     # Now figure out how much of the memory will be used by the executors, and how much will be used by the cache.
@@ -875,7 +876,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
     # Calculate value for prop 'llap_heap_size'
     llap_xmx = max(total_mem_for_executors_per_node * 0.8, total_mem_for_executors_per_node - self.get_llap_headroom_space(services, configurations))
-    Logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node))
+    self.logger.info("DBG: Calculated llap_app_heap_size : {0}, using following : total_mem_for_executors : {1}".format(llap_xmx, total_mem_for_executors_per_node))
 
     # Calculate 'hive_heapsize' for Hive2/HiveServer2 (HSI)
     hive_server_interactive_heapsize = None
@@ -886,16 +887,16 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     if hive_server_interactive_hosts is not None and len(hive_server_interactive_hosts) > 0:
       host_mem = long(hive_server_interactive_hosts[0]["Hosts"]["total_mem"])
       hive_server_interactive_heapsize = min(max(2048.0, 400.0*llap_concurrency), 3.0/8 * host_mem)
-      Logger.info("DBG: Calculated 'hive_server_interactive_heapsize' : {0}, using following : llap_concurrency : {1}, host_mem : "
+      self.logger.info("DBG: Calculated 'hive_server_interactive_heapsize' : {0}, using following : llap_concurrency : {1}, host_mem : "
                   "{2}".format(hive_server_interactive_heapsize, llap_concurrency, host_mem))
 
     # Done with calculations, updating calculated configs.
-    Logger.info("DBG: Applying the calculated values....")
+    self.logger.info("DBG: Applying the calculated values....")
 
     if is_cluster_create_opr or changed_configs_has_enable_hive_int:
       normalized_tez_am_container_size = long(normalized_tez_am_container_size)
       putTezInteractiveSiteProperty('tez.am.resource.memory.mb', normalized_tez_am_container_size)
-      Logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size))
+      self.logger.info("DBG: Setting 'tez.am.resource.memory.mb' config value as : {0}".format(normalized_tez_am_container_size))
 
     if not llap_concurrency_in_changed_configs:
       min_llap_concurrency = 1
@@ -910,16 +911,16 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt)
     #TODO A single value is not being set for numNodes in case of a custom queue. Also the attribute is set to non-visible, so the UI likely ends up using an old cached value
     if (num_llap_nodes != num_llap_nodes_requested):
-      Logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
+      self.logger.info("DBG: User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
     else:
-      Logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
+      self.logger.info("DBG: Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
 
     # Safeguard for not adding "num_llap_nodes_for_llap_daemons" if it doesnt exist in hive-interactive-site.
     # This can happen if we upgrade from Ambari 2.4 (with HDP 2.5) to Ambari 2.5, as this config is from 2.6 stack onwards only.
     if "hive-interactive-env" in services["configurations"] and \
         "num_llap_nodes_for_llap_daemons" in services["configurations"]["hive-interactive-env"]["properties"]:
       putHiveInteractiveEnvProperty('num_llap_nodes_for_llap_daemons', num_llap_nodes)
-      Logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes))
+      self.logger.info("DBG: Setting config 'num_llap_nodes_for_llap_daemons' as : {0}".format(num_llap_nodes))
 
     llap_container_size = long(llap_daemon_mem_per_node)
     putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)
@@ -929,7 +930,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     if is_cluster_create_opr or changed_configs_has_enable_hive_int:
       mem_per_thread_for_llap = long(mem_per_thread_for_llap)
       putHiveInteractiveSiteProperty('hive.tez.container.size', mem_per_thread_for_llap)
-      Logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap))
+      self.logger.info("DBG: Setting 'hive.tez.container.size' config value as : {0}".format(mem_per_thread_for_llap))
 
     putTezInteractiveSiteProperty('tez.runtime.io.sort.mb', tez_runtime_io_sort_mb)
     if "tez-site" in services["configurations"] and "tez.runtime.sorter.class" in services["configurations"]["tez-site"]["properties"]:
@@ -940,7 +941,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     putHiveInteractiveSiteProperty('hive.auto.convert.join.noconditionaltask.size', hive_auto_convert_join_noconditionaltask_size)
 
     num_executors_per_node = long(num_executors_per_node)
-    Logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node))
+    self.logger.info("DBG: Putting num_executors_per_node as {0}".format(num_executors_per_node))
     putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node)
     putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
     putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "maximum", long(num_executors_per_node_max))
@@ -960,10 +961,10 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
     putHiveInteractiveEnvProperty('llap_heap_size', long(llap_xmx))
     putHiveInteractiveEnvProperty('slider_am_container_mb', long(slider_am_container_size))
-    Logger.info("DBG: Done putting all configs")
+    self.logger.info("DBG: Done putting all configs")
 
   def recommendDefaultLlapConfiguration(self, configurations, services, hosts):
-    Logger.info("DBG: Something likely went wrong. recommendDefaultLlapConfiguration")
+    self.logger.info("DBG: Something likely went wrong. recommendDefaultLlapConfiguration")
     putHiveInteractiveSiteProperty = self.putProperty(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE)
 
@@ -1015,7 +1016,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     elif hsi_env and 'num_llap_nodes' in hsi_env:
       num_llap_nodes = hsi_env['num_llap_nodes']
     else:
-      Logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes))
+      self.logger.error("Couldn't retrieve Hive Server 'num_llap_nodes' config. Setting value to {0}".format(num_llap_nodes))
 
     return float(num_llap_nodes)
 
@@ -1042,10 +1043,10 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
       else:
         calculated_hive_tez_container_size = 4096
 
-      Logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size))
+      self.logger.info("DBG: Calculated and returning 'hive_tez_container_size' : {0}".format(calculated_hive_tez_container_size))
       return calculated_hive_tez_container_size
     else:
-      Logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size))
+      self.logger.info("DBG: Returning 'hive_tez_container_size' : {0}".format(hive_tez_container_size))
       return hive_tez_container_size
 
   def get_hive_tez_container_size(self, services):
@@ -1079,16 +1080,16 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     # Check if 'llap_headroom_space' is modified in current SA invocation.
     if 'hive-interactive-env' in configurations and 'llap_headroom_space' in configurations['hive-interactive-env']['properties']:
       hive_container_size = float(configurations['hive-interactive-env']['properties']['llap_headroom_space'])
-      Logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
+      self.logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
 
     if llap_headroom_space is None:
       # Check if 'llap_headroom_space' is input in services array.
       if 'llap_headroom_space' in services['configurations']['hive-interactive-env']['properties']:
         llap_headroom_space = float(services['configurations']['hive-interactive-env']['properties']['llap_headroom_space'])
-        Logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
+        self.logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
     if not llap_headroom_space or llap_headroom_space < 1:
       llap_headroom_space = 6144 # 6GB
-      Logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
+      self.logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
 
     return llap_headroom_space
 
@@ -1098,7 +1099,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
                (2). Updates 'llap' queue capacity and state, if current selected queue is 'llap', and only 2 queues exist
                     at root level : 'default' and 'llap'.
     """
-    Logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
+    self.logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
     putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
     putHiveInteractiveSiteProperty = self.putProperty(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE, services)
     putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
@@ -1178,7 +1179,7 @@ yarn.scheduler.capacity.root.{0}.acl_administer_queue={2}
 yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_queue_name, llap_queue_cap_perc, hive_user)
 
           putCapSchedProperty("capacity-scheduler", updated_cap_sched_configs_str)
-          Logger.info("Updated 'capacity-scheduler' configs as one concatenated string.")
+          self.logger.info("Updated 'capacity-scheduler' configs as one concatenated string.")
         else:
           # If capacity-scheduler configs are received as a  dictionary (generally 1st time), we deposit the changed
           # values back as dictionary itself.
@@ -1205,16 +1206,16 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
           putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".acl_administer_queue", hive_user)
           putCapSchedProperty("yarn.scheduler.capacity.root." + llap_queue_name + ".maximum-am-resource-percent", "1")
 
-          Logger.info("Updated 'capacity-scheduler' configs as a dictionary.")
+          self.logger.info("Updated 'capacity-scheduler' configs as a dictionary.")
           updated_cap_sched_configs_as_dict = True
 
         if updated_cap_sched_configs_str or updated_cap_sched_configs_as_dict:
           if len(leafQueueNames) == 1: # 'llap' queue didn't exist before
-            Logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \
+            self.logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted 'default' queue capacity to : {2}%" \
                         .format(llap_queue_name, llap_queue_cap_perc, adjusted_default_queue_cap))
           else: # Queue existed, only adjustments done.
-            Logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc))
-            Logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
+            self.logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_queue_cap_perc))
+            self.logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
 
           # Update Hive 'hive.llap.daemon.queue.name' prop to use 'llap' queue.
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', llap_queue_name)
@@ -1222,9 +1223,9 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
           # Update 'hive.llap.daemon.queue.name' prop combo entries and llap capacity slider visibility.
           self.setLlapDaemonQueuePropAttributes(services, configurations)
       else:
-        Logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
+        self.logger.debug("Not creating/adjusting {0} queue. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
 
   def checkAndStopLlapQueue(self, services, configurations, llap_queue_name):
     """
@@ -1250,7 +1251,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
         if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in capacity_scheduler_properties.keys():
           currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state')
         else:
-          Logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
+          self.logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
           return
         if currLlapQueueState == 'RUNNING':
           DEFAULT_MAX_CAPACITY = '100'
@@ -1280,28 +1281,28 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
               elif prop.startswith('yarn.'):
                 updated_llap_queue_configs = updated_llap_queue_configs + prop + "=" + val + "\n"
         else:
-          Logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState))
+          self.logger.debug("{0} queue state is : {1}. Skipping adjusting queues.".format(llap_queue_name, currLlapQueueState))
           return
 
         if updated_default_queue_configs and updated_llap_queue_configs:
           putCapSchedProperty("capacity-scheduler", updated_default_queue_configs+updated_llap_queue_configs)
-          Logger.info("Changed YARN '{0}' queue state to 'STOPPED', and capacity to 0%. Adjusted 'default' queue capacity to : {1}%" \
+          self.logger.info("Changed YARN '{0}' queue state to 'STOPPED', and capacity to 0%. Adjusted 'default' queue capacity to : {1}%" \
                       .format(llap_queue_name, DEFAULT_MAX_CAPACITY))
 
           # Update Hive 'hive.llap.daemon.queue.name' prop to use 'default' queue.
           putHiveInteractiveSiteProperty('hive.llap.daemon.queue.name', YARNRecommender.YARN_ROOT_DEFAULT_QUEUE_NAME)
           putHiveInteractiveSiteProperty('hive.server2.tez.default.queues', YARNRecommender.YARN_ROOT_DEFAULT_QUEUE_NAME)
       else:
-        Logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
+        self.logger.debug("Not removing '{0}' queue as number of Queues not equal to 2. Current YARN queues : {1}".format(llap_queue_name, list(leafQueueNames)))
     else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
+      self.logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive.")
 
   def setLlapDaemonQueuePropAttributes(self, services, configurations):
     """
     Checks and sets the 'Hive Server Interactive' 'hive.llap.daemon.queue.name' config Property Attributes.  Takes into
     account that 'capacity-scheduler' may have changed (got updated) in current Stack Advisor invocation.
     """
-    Logger.info("Determining 'hive.llap.daemon.queue.name' config Property Attributes.")
+    self.logger.info("Determining 'hive.llap.daemon.queue.name' config Property Attributes.")
     #TODO Determine if this is doing the right thing if some queue is setup with capacity=0, or is STOPPED. Maybe don't list it.
     putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, YARNRecommender.HIVE_INTERACTIVE_SITE)
 
@@ -1320,31 +1321,31 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
             for property in cap_sched_props_as_str:
               key, sep, value = property.partition("=")
               capacity_scheduler_properties[key] = value
-            Logger.info("'capacity-scheduler' configs is set as a single '\\n' separated string in current invocation. "
+            self.logger.info("'capacity-scheduler' configs is set as a single '\\n' separated string in current invocation. "
                         "count(configurations['capacity-scheduler']['properties']['capacity-scheduler']) = "
                         "{0}".format(len(capacity_scheduler_properties)))
           else:
-            Logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str))
+            self.logger.info("Read configurations['capacity-scheduler']['properties']['capacity-scheduler'] is : {0}".format(cap_sched_props_as_str))
         else:
-          Logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str))
+          self.logger.info("configurations['capacity-scheduler']['properties']['capacity-scheduler'] : {0}.".format(cap_sched_props_as_str))
 
       # if 'capacity_scheduler_properties' is empty, implies we may have 'capacity-scheduler' configs as dictionary
       # in configurations, if 'capacity-scheduler' changed in current invocation.
       if not capacity_scheduler_properties:
         if isinstance(cap_sched_props_as_dict, dict) and len(cap_sched_props_as_dict) > 1:
           capacity_scheduler_properties = cap_sched_props_as_dict
-          Logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.")
+          self.logger.info("'capacity-scheduler' changed in current Stack Advisor invocation. Retrieved the configs as dictionary from configurations.")
         else:
-          Logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict))
+          self.logger.info("Read configurations['capacity-scheduler']['properties'] is : {0}".format(cap_sched_props_as_dict))
     else:
-      Logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.")
+      self.logger.info("'capacity-scheduler' not modified in the current Stack Advisor invocation.")
 
 
     # if 'capacity_scheduler_properties' is still empty, implies 'capacity_scheduler' wasn't change in current
     # SA invocation. Thus, read it from input : 'services'.
     if not capacity_scheduler_properties:
       capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
-      Logger.info("'capacity-scheduler' not changed in current Stack Advisor invocation. Retrieved the configs from services.")
+      self.logger.info("'capacity-scheduler' not changed in current Stack Advisor invocation. Retrieved the configs from services.")
 
     # Get set of current YARN leaf queues.
     leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
@@ -1352,9 +1353,9 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
       leafQueues = [{"label": str(queueName), "value": queueName} for queueName in leafQueueNames]
       leafQueues = sorted(leafQueues, key=lambda q: q['value'])
       putHiveInteractiveSitePropertyAttribute("hive.llap.daemon.queue.name", "entries", leafQueues)
-      Logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues))
+      self.logger.info("'hive.llap.daemon.queue.name' config Property Attributes set to : {0}".format(leafQueues))
     else:
-      Logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve "
+      self.logger.error("Problem retrieving YARN queues. Skipping updating HIVE Server Interactve "
                    "'hive.server2.tez.default.queues' property attributes.")
 
   #TODO  Convert this to a helper. It can apply to any property. Check config, or check if in the list of changed configurations and read the latest value
@@ -1382,15 +1383,15 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     # Check if services["changed-configurations"] is empty and 'yarn.scheduler.minimum-allocation-mb' is modified in current ST invocation.
     if not services["changed-configurations"] and yarn_site and yarn_min_allocation_property in yarn_site:
       yarn_min_container_size = yarn_site[yarn_min_allocation_property]
-      Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size))
+      self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from output as : {0}".format(yarn_min_container_size))
 
     # Check if 'yarn.scheduler.minimum-allocation-mb' is input in services array.
     elif yarn_site_properties and yarn_min_allocation_property in yarn_site_properties:
       yarn_min_container_size = yarn_site_properties[yarn_min_allocation_property]
-      Logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
+      self.logger.info("DBG: 'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
 
     if not yarn_min_container_size:
-      Logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property))
+      self.logger.error("{0} was not found in the configuration".format(yarn_min_allocation_property))
 
     return yarn_min_container_size
 
@@ -1430,7 +1431,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
       yarn_nm_mem_in_mb = float(yarn_site['yarn.nodemanager.resource.memory-mb'])
 
     if yarn_nm_mem_in_mb <= 0.0:
-      Logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb))
+      self.logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb))
 
     return yarn_nm_mem_in_mb
 
@@ -1450,10 +1451,10 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
       elif total_cluster_capacity > 98304:
         calculated_tez_am_resource_memory_mb = 4096
 
-      Logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb))
+      self.logger.info("DBG: Calculated and returning 'tez_am_resource_memory_mb' as : {0}".format(calculated_tez_am_resource_memory_mb))
       return float(calculated_tez_am_resource_memory_mb)
     else:
-      Logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb))
+      self.logger.info("DBG: Returning 'tez_am_resource_memory_mb' as : {0}".format(tez_am_resource_memory_mb))
       return float(tez_am_resource_memory_mb)
 
   def get_tez_am_resource_memory_mb(self, services):
@@ -1532,14 +1533,14 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     for key in cap_sched_keys:
       if key.endswith("." + llap_daemon_selected_queue_name+".maximum-am-resource-percent"):
         llap_selected_queue_am_percent_key = key
-        Logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key))
+        self.logger.info("AM percent key got for '{0}' queue is : '{1}'".format(llap_daemon_selected_queue_name, llap_selected_queue_am_percent_key))
         break
     if llap_selected_queue_am_percent_key is None:
-      Logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name))
+      self.logger.info("Returning default AM percent value : '0.1' for queue : {0}".format(llap_daemon_selected_queue_name))
       return 0.1 # Default value to use if we couldn't retrieve queue's corresponding AM Percent key.
     else:
       llap_selected_queue_am_percent = capacity_scheduler_properties.get(llap_selected_queue_am_percent_key)
-      Logger.info("Returning read value for key '{0}' as : '{1}' for queue : '{2}'".format(llap_selected_queue_am_percent_key,
+      self.logger.info("Returning read value for key '{0}' as : '{1}' for queue : '{2}'".format(llap_selected_queue_am_percent_key,
                                                                                            llap_selected_queue_am_percent,
                                                                                            llap_daemon_selected_queue_name))
       return llap_selected_queue_am_percent
@@ -1548,7 +1549,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     """
     Calculates the total available capacity for the passed-in YARN queue of any level based on the percentages.
     """
-    Logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name))
+    self.logger.info("Entered __getSelectedQueueTotalCap fn() with llap_daemon_selected_queue_name= '{0}'.".format(llap_daemon_selected_queue_name))
     available_capacity = total_cluster_capacity
     queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
     if queue_cap_key:
@@ -1558,13 +1559,13 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
         queue_path = queue_cap_key[24:]  # Strip from beginning 'yarn.scheduler.capacity.'
         queue_path = queue_path[0:-9]  # Strip from end '.capacity'
         queues_list = queue_path.split('.')
-        Logger.info("Queue list : {0}".format(queues_list))
+        self.logger.info("Queue list : {0}".format(queues_list))
         if queues_list:
           for queue in queues_list:
             queue_cap_key = self.__getQueueCapacityKeyFromCapacityScheduler(capacity_scheduler_properties, queue)
             queue_cap_perc = float(capacity_scheduler_properties.get(queue_cap_key))
             available_capacity = queue_cap_perc / 100 * available_capacity
-            Logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity))
+            self.logger.info("Total capacity available for queue {0} is : {1}".format(queue, available_capacity))
 
     # returns the capacity calculated for passed-in queue in 'llap_daemon_selected_queue_name'.
     return available_capacity
@@ -1580,7 +1581,7 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     for key in cap_sched_keys:
       # Expected capacity prop key is of form : 'yarn.scheduler.capacity.<one or more queues in path separated by '.'>.[llap_daemon_selected_queue_name].capacity'
       if key.endswith(llap_daemon_selected_queue_name+".capacity") and key.startswith("yarn.scheduler.capacity.root"):
-        Logger.info("DBG: Selected queue name as: " + key)
+        self.logger.info("DBG: Selected queue name as: " + key)
         llap_selected_queue_cap_key = key
         break
     return llap_selected_queue_cap_key
@@ -1753,7 +1754,7 @@ class YARNValidator(service_advisor.ServiceAdvisor):
     else:
       webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.https.address"]
       propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
-      Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
+      self.logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
     if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
       validationItems = [
                       {"config-name": "yarn.log.server.web-service.url",

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py
index 4174b9c..c86eb33 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py
@@ -24,7 +24,6 @@ import traceback
 import inspect
 
 # Local imports
-from resource_management.core.logger import Logger
 
 
 SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -45,6 +44,8 @@ class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
     self.as_super = super(ZookeeperServiceAdvisor, self)
     self.as_super.__init__(*args, **kwargs)
 
+    self.initialize_logger("ZookeeperServiceAdvisor")
+
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()
     self.modifyHeapSizeProperties()
@@ -105,7 +106,7 @@ class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
     """
     Get a list of errors. Zookeeper does not have any validations in this version.
     """
-    Logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
+    self.logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
     return self.as_super.getServiceComponentLayoutValidations(services, hosts)
 
@@ -113,7 +114,7 @@ class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
     """
     Recommend configurations to set. Zookeeper does not have any recommendations in this version.
     """
-    Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     self.recommendConfigurations(configurations, clusterData, services, hosts)
@@ -122,10 +123,10 @@ class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
     """
     Recommend configurations for this service.
     """
-    Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
-    Logger.info("Setting zoo.cfg to default dataDir to /hadoop/zookeeper on the best matching mount")
+    self.logger.info("Setting zoo.cfg to default dataDir to /hadoop/zookeeper on the best matching mount")
 
     zk_mount_properties = [
       ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
@@ -136,7 +137,7 @@ class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
     """
     Validate configurations for the service. Return a list of errors.
     """
-    Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    self.logger.info("Class: %s, Method: %s. Validating Configurations." %
                 (self.__class__.__name__, inspect.stack()[0][3]))
 
     items = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
index 6ef74d2..8fa6bc3 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
@@ -22,7 +22,6 @@ import re
 from math import ceil
 
 # Local Imports
-from resource_management.core.logger import Logger
 from stack_advisor import DefaultStackAdvisor
 
 class BaseBIGTOP08StackAdvisor(DefaultStackAdvisor):
@@ -30,7 +29,7 @@ class BaseBIGTOP08StackAdvisor(DefaultStackAdvisor):
 
   def __init__(self):
     super(BaseBIGTOP08StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("BaseBIGTOP08StackAdvisor")
 
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 68026d6..5b8cba5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -25,9 +25,9 @@ import socket
 from math import ceil, floor, log
 
 # Local Imports
-from resource_management.core.logger import Logger
 from resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs
 from resource_management.libraries.functions.data_structure_utils import get_from_dict
+from resource_management.core.logger import Logger
 from stack_advisor import DefaultStackAdvisor
 
 
@@ -35,7 +35,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
   def __init__(self):
     super(HDP206StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDP206StackAdvisor")
+    Logger.logger = self.logger
 
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()
@@ -1181,7 +1182,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       yarn_nm_mem_in_mb = float(yarn_site['yarn.nodemanager.resource.memory-mb'])
 
     if yarn_nm_mem_in_mb <= 0.0:
-      Logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb))
+      self.logger.warning("'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(yarn_nm_mem_in_mb))
 
     return yarn_nm_mem_in_mb
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 81c9b72..866d4cb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -21,14 +21,13 @@ limitations under the License.
 import socket
 
 # Local Imports
-from resource_management.core.logger import Logger
 
 
 class HDP21StackAdvisor(HDP206StackAdvisor):
 
   def __init__(self):
     super(HDP21StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDP21StackAdvisor")
 
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 800edbe..4d672d2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -28,14 +28,13 @@ import re
 import xml.etree.ElementTree as ET
 
 # Local Imports
-from resource_management.core.logger import Logger
 
 
 class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def __init__(self):
     super(HDP22StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDP22StackAdvisor")
 
     self.modifyMastersWithMultipleInstances()
     self.modifyCardinalitiesDict()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9feb5d0f/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 781ff13..67532c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -25,7 +25,6 @@ import math
 import socket
 
 # Local Imports
-from resource_management.core.logger import Logger
 
 
 DB_TYPE_DEFAULT_PORT_MAP = {"MYSQL":"3306", "ORACLE":"1521", "POSTGRES":"5432", "MSSQL":"1433", "SQLA":"2638"}
@@ -34,7 +33,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
   def __init__(self):
     super(HDP23StackAdvisor, self).__init__()
-    Logger.initialize_logger()
+    self.initialize_logger("HDP23StackAdvisor")
 
   def __getHosts(self, componentsList, componentName):
     host_lists = [component["hostnames"] for component in componentsList if


Mime
View raw message