ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject [1/5] ambari git commit: AMBARI-13037 AMS - stack advisor should recommend topology smarts alongwith optimistic settings (dsen)
Date Thu, 10 Sep 2015 19:04:21 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 621e5c31c -> 1ec86cd83


http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/service-metrics/YARN.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/service-metrics/YARN.txt b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/service-metrics/YARN.txt
deleted file mode 100644
index ce04228..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/service-metrics/YARN.txt
+++ /dev/null
@@ -1,178 +0,0 @@
-jvm.JvmMetrics.GcCount
-jvm.JvmMetrics.GcCountPS
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisPS
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-jvm.JvmMetrics.ThreadsWaiting
-mapred.ShuffleMetrics.ShuffleConnections
-mapred.ShuffleMetrics.ShuffleOutputBytes
-mapred.ShuffleMetrics.ShuffleOutputsFailed
-mapred.ShuffleMetrics.ShuffleOutputsOK
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-metricssystem.MetricsSystem.SnapshotNumOps
-rpc.rpc.CallQueueLength
-rpc.rpc.NumOpenConnections
-rpc.rpc.ReceivedBytes
-rpc.rpc.RpcAuthenticationFailures
-rpc.rpc.RpcAuthenticationSuccesses
-rpc.rpc.RpcAuthorizationFailures
-rpc.rpc.RpcAuthorizationSuccesses
-rpc.rpc.RpcClientBackoff
-rpc.rpc.RpcProcessingTimeAvgTime
-rpc.rpc.RpcProcessingTimeNumOps
-rpc.rpc.RpcQueueTimeAvgTime
-rpc.rpc.RpcQueueTimeNumOps
-rpc.rpc.RpcSlowCalls
-rpc.rpc.SentBytes
-rpcdetailed.rpcdetailed.AllocateAvgTime
-rpcdetailed.rpcdetailed.AllocateNumOps
-rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime
-rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps
-rpcdetailed.rpcdetailed.GetApplicationReportAvgTime
-rpcdetailed.rpcdetailed.GetApplicationReportNumOps
-rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime
-rpcdetailed.rpcdetailed.GetClusterMetricsNumOps
-rpcdetailed.rpcdetailed.GetClusterNodesAvgTime
-rpcdetailed.rpcdetailed.GetClusterNodesNumOps
-rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime
-rpcdetailed.rpcdetailed.GetContainerStatusesNumOps
-rpcdetailed.rpcdetailed.GetNewApplicationAvgTime
-rpcdetailed.rpcdetailed.GetNewApplicationNumOps
-rpcdetailed.rpcdetailed.GetQueueInfoAvgTime
-rpcdetailed.rpcdetailed.GetQueueInfoNumOps
-rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime
-rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps
-rpcdetailed.rpcdetailed.HeartbeatAvgTime
-rpcdetailed.rpcdetailed.HeartbeatNumOps
-rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime
-rpcdetailed.rpcdetailed.NodeHeartbeatNumOps
-rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime
-rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps
-rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime
-rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps
-rpcdetailed.rpcdetailed.StartContainersAvgTime
-rpcdetailed.rpcdetailed.StartContainersNumOps
-rpcdetailed.rpcdetailed.StopContainersAvgTime
-rpcdetailed.rpcdetailed.StopContainersNumOps
-rpcdetailed.rpcdetailed.SubmitApplicationAvgTime
-rpcdetailed.rpcdetailed.SubmitApplicationNumOps
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
-yarn.ClusterMetrics.AMLaunchDelayAvgTime
-yarn.ClusterMetrics.AMLaunchDelayNumOps
-yarn.ClusterMetrics.AMRegisterDelayAvgTime
-yarn.ClusterMetrics.AMRegisterDelayNumOps
-yarn.ClusterMetrics.NumActiveNMs
-yarn.ClusterMetrics.NumDecommissionedNMs
-yarn.ClusterMetrics.NumLostNMs
-yarn.ClusterMetrics.NumRebootedNMs
-yarn.ClusterMetrics.NumUnhealthyNMs
-yarn.NodeManagerMetrics.AllocatedContainers
-yarn.NodeManagerMetrics.AllocatedGB
-yarn.NodeManagerMetrics.AllocatedVCores
-yarn.NodeManagerMetrics.AvailableGB
-yarn.NodeManagerMetrics.AvailableVCores
-yarn.NodeManagerMetrics.BadLocalDirs
-yarn.NodeManagerMetrics.BadLogDirs
-yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime
-yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps
-yarn.NodeManagerMetrics.ContainersCompleted
-yarn.NodeManagerMetrics.ContainersFailed
-yarn.NodeManagerMetrics.ContainersIniting
-yarn.NodeManagerMetrics.ContainersKilled
-yarn.NodeManagerMetrics.ContainersLaunched
-yarn.NodeManagerMetrics.ContainersRunning
-yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc
-yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc
-yarn.QueueMetrics.Queue=root.AMResourceLimitMB
-yarn.QueueMetrics.Queue=root.AMResourceLimitVCores
-yarn.QueueMetrics.Queue=root.ActiveApplications
-yarn.QueueMetrics.Queue=root.ActiveUsers
-yarn.QueueMetrics.Queue=root.AggregateContainersAllocated
-yarn.QueueMetrics.Queue=root.AggregateContainersReleased
-yarn.QueueMetrics.Queue=root.AllocatedContainers
-yarn.QueueMetrics.Queue=root.AllocatedMB
-yarn.QueueMetrics.Queue=root.AllocatedVCores
-yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayAvgTime
-yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayNumOps
-yarn.QueueMetrics.Queue=root.AppsCompleted
-yarn.QueueMetrics.Queue=root.AppsFailed
-yarn.QueueMetrics.Queue=root.AppsKilled
-yarn.QueueMetrics.Queue=root.AppsPending
-yarn.QueueMetrics.Queue=root.AppsRunning
-yarn.QueueMetrics.Queue=root.AppsSubmitted
-yarn.QueueMetrics.Queue=root.AvailableMB
-yarn.QueueMetrics.Queue=root.AvailableVCores
-yarn.QueueMetrics.Queue=root.PendingContainers
-yarn.QueueMetrics.Queue=root.PendingMB
-yarn.QueueMetrics.Queue=root.PendingVCores
-yarn.QueueMetrics.Queue=root.ReservedContainers
-yarn.QueueMetrics.Queue=root.ReservedMB
-yarn.QueueMetrics.Queue=root.ReservedVCores
-yarn.QueueMetrics.Queue=root.UsedAMResourceMB
-yarn.QueueMetrics.Queue=root.UsedAMResourceVCores
-yarn.QueueMetrics.Queue=root.default.AMResourceLimitMB
-yarn.QueueMetrics.Queue=root.default.AMResourceLimitVCores
-yarn.QueueMetrics.Queue=root.default.ActiveApplications
-yarn.QueueMetrics.Queue=root.default.ActiveUsers
-yarn.QueueMetrics.Queue=root.default.AggregateContainersAllocated
-yarn.QueueMetrics.Queue=root.default.AggregateContainersReleased
-yarn.QueueMetrics.Queue=root.default.AllocatedContainers
-yarn.QueueMetrics.Queue=root.default.AllocatedMB
-yarn.QueueMetrics.Queue=root.default.AllocatedVCores
-yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayAvgTime
-yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayNumOps
-yarn.QueueMetrics.Queue=root.default.AppsCompleted
-yarn.QueueMetrics.Queue=root.default.AppsFailed
-yarn.QueueMetrics.Queue=root.default.AppsKilled
-yarn.QueueMetrics.Queue=root.default.AppsPending
-yarn.QueueMetrics.Queue=root.default.AppsRunning
-yarn.QueueMetrics.Queue=root.default.AppsSubmitted
-yarn.QueueMetrics.Queue=root.default.AvailableMB
-yarn.QueueMetrics.Queue=root.default.AvailableVCores
-yarn.QueueMetrics.Queue=root.default.PendingContainers
-yarn.QueueMetrics.Queue=root.default.PendingMB
-yarn.QueueMetrics.Queue=root.default.PendingVCores
-yarn.QueueMetrics.Queue=root.default.ReservedContainers
-yarn.QueueMetrics.Queue=root.default.ReservedMB
-yarn.QueueMetrics.Queue=root.default.ReservedVCores
-yarn.QueueMetrics.Queue=root.default.UsedAMResourceMB
-yarn.QueueMetrics.Queue=root.default.UsedAMResourceVCores
-yarn.QueueMetrics.Queue=root.default.running_0
-yarn.QueueMetrics.Queue=root.default.running_1440
-yarn.QueueMetrics.Queue=root.default.running_300
-yarn.QueueMetrics.Queue=root.default.running_60
-yarn.QueueMetrics.Queue=root.running_0
-yarn.QueueMetrics.Queue=root.running_1440
-yarn.QueueMetrics.Queue=root.running_300
-yarn.QueueMetrics.Queue=root.running_60

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/split_points.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/split_points.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/split_points.py
deleted file mode 100644
index 910bde3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/AMBARI_METRICS/split_points.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# !/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import sys
-import re
-import math
-import collections
-import ast
-
-metric_filename_ext = '.txt'
-# 5 regions for higher order aggregate tables
-other_region_static_count = 5
-
-b_bytes = 1
-k_bytes = 1 << 10  # 1024
-m_bytes = 1 << 20  # 1024^2
-g_bytes = 1 << 30  # 1024^3
-t_bytes = 1 << 40  # 1024^4
-p_bytes = 1 << 50  # 1024^5
-
-def to_number(s):
-  try:
-    return int(re.sub("\D", "", s))
-  except ValueError:
-    return None
-
-def format_Xmx_size_to_bytes(value):
-  strvalue = str(value).lower()
-  if len(strvalue) == 0:
-    return 0
-  modifier = strvalue[-1]
-
-  if modifier == ' ' or modifier in "0123456789":
-    modifier = 'b'
-
-  m = {
-    modifier == 'b': b_bytes,
-    modifier == 'k': k_bytes,
-    modifier == 'm': m_bytes,
-    modifier == 'g': g_bytes,
-    modifier == 't': t_bytes,
-    modifier == 'p': p_bytes
-    } [1]
-  return to_number(strvalue) * m
-
-# Class that takes AMS HBase configs as input and determines the Region
-# pre-splits based on selected services also passed as a parameter to the class.
-class FindSplitPointsForAMSRegions():
-
-  def __init__(self, ams_hbase_site, ams_hbase_env, serviceMetricsDir,
-               operation_mode = 'embedded', services = None):
-    self.ams_hbase_site = ams_hbase_site
-    self.ams_hbase_env = ams_hbase_env
-    self.serviceMetricsDir = serviceMetricsDir
-    self.services = services
-    self.mode = operation_mode
-    # Initialize before user
-    self.initialize()
-
-  def initialize(self):
-    # calculate regions based on available memory
-    self.initialize_region_counts()
-    self.initialize_ordered_set_of_metrics()
-
-  def initialize_region_counts(self):
-    try:
-      xmx_master_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_master_heapsize'])
-      xmx_region_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_regionserver_heapsize'])
-      xmx_bytes = xmx_master_bytes + xmx_region_bytes
-      if self.mode == 'distributed':
-        xmx_bytes = xmx_region_bytes
-
-      memstore_max_mem = float(self.ams_hbase_site['hbase.regionserver.global.memstore.upperLimit']) * xmx_bytes
-      memstore_flush_size = format_Xmx_size_to_bytes(self.ams_hbase_site['hbase.hregion.memstore.flush.size'])
-
-      max_inmemory_regions = (memstore_max_mem / memstore_flush_size) - other_region_static_count
-      print 'max_inmemory_regions: %s' % max_inmemory_regions
-
-      if max_inmemory_regions > 2:
-        # Lets say total = 12, so we have 7 regions to allocate between
-        # METRIC_RECORD and METRIC_AGGREGATE tables, desired = (5, 2)
-        self.desired_precision_region_count = int(math.floor(0.8 * max_inmemory_regions))
-        self.desired_aggregate_region_count = int(max_inmemory_regions - self.desired_precision_region_count)
-      else:
-        self.desired_precision_region_count = 1
-        self.desired_aggregate_region_count = 1
-
-    except:
-      print('Bad config settings, could not calculate max regions available.')
-    pass
-
-  def initialize_ordered_set_of_metrics(self):
-    onlyServicefiles = [ f for f in os.listdir(self.serviceMetricsDir) if
-                  os.path.isfile(os.path.join(self.serviceMetricsDir, f)) ]
-
-    metrics = set()
-
-    for file in onlyServicefiles:
-      # Process for services selected at deploy time or all services if
-      # services arg is not passed
-      if self.services is None or file.rstrip(metric_filename_ext) in self.services:
-        print 'Processing file: %s' % os.path.join(self.serviceMetricsDir, file)
-        with open(os.path.join(self.serviceMetricsDir, file), 'r') as f:
-          for metric in f:
-            metrics.add(metric.strip())
-      pass
-    pass
-
-    self.metrics = sorted(metrics)
-    print 'metrics length: %s' % len(self.metrics)
-
-  def get_split_points(self):
-    split_points = collections.namedtuple('SplitPoints', [ 'precision', 'aggregate' ])
-    split_points.precision = []
-    split_points.aggregate = []
-
-    metric_list = list(self.metrics)
-    metrics_total = len(metric_list)
-
-    print 'desired_precision_region_count: %s' % self.desired_precision_region_count
-    print 'desired_aggregate_region_count: %s' % self.desired_aggregate_region_count
-
-    if self.desired_precision_region_count > 1:
-      idx = int(math.ceil(metrics_total / self.desired_precision_region_count))
-      index = idx
-      for i in range(0, self.desired_precision_region_count - 1):
-        if index < metrics_total - 1:
-          split_points.precision.append(metric_list[index])
-          index += idx
-
-    if self.desired_aggregate_region_count > 1:
-      idx = int(math.ceil(metrics_total / self.desired_aggregate_region_count))
-      index = idx
-      for i in range(0, self.desired_aggregate_region_count - 1):
-        if index < metrics_total - 1:
-          split_points.aggregate.append(metric_list[index])
-          index += idx
-
-    return split_points
-  pass
-
-def main(argv = None):
-  scriptDir = os.path.realpath(os.path.dirname(argv[0]))
-  serviceMetricsDir = os.path.join(scriptDir, 'service-metrics')
-  if os.path.exists(serviceMetricsDir):
-    onlyargs = argv[1:]
-    if len(onlyargs) < 3:
-      sys.stderr.write("Usage: dict(ams-hbase-site) dict(ams-hbase-env) list(services)\n")
-      sys.exit(2)
-    pass
-
-    ams_hbase_site = None
-    ams_hbase_env = None
-    services = None
-    try:
-      ams_hbase_site = ast.literal_eval(str(onlyargs[0]))
-      ams_hbase_env = ast.literal_eval(str(onlyargs[1]))
-      services = onlyargs[2]
-      if services:
-        services = str(services).split(',')
-      pass
-    except Exception, ex:
-      sys.stderr.write(str(ex))
-      sys.stderr.write("\nUsage: Expected items not found in input. Found "
-                      " ams-hbase-site => {0}, ams-hbase-env => {1},"
-                      " services => {2}".format(ams_hbase_site, ams_hbase_env, services))
-      sys.exit(2)
-
-    print '--------- AMS Regions Split point finder ---------'
-    print 'Services: %s' % services
-
-    mode = 'distributed' if 'hbase.rootdir' in ams_hbase_site and \
-                            'hdfs' in ams_hbase_site['hbase.rootdir'] else \
-                            'embedded'
-
-    split_point_finder = FindSplitPointsForAMSRegions(
-      ams_hbase_site, ams_hbase_env, serviceMetricsDir, mode, services)
-
-    result = split_point_finder.get_split_points()
-    print 'Split points for precision table : %s' % len(result.precision)
-    print 'precision: %s' % str(result.precision)
-    print 'Split points for aggregate table : %s' % len(result.aggregate)
-    print 'aggregate: %s' % str(result.aggregate)
-
-    return 0
-
-  else:
-    print 'Cannot find service metrics dir in %s' % scriptDir
-
-if __name__ == '__main__':
-  main(sys.argv)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index cce0fc5..28dd007 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import re
 import os
+import sys
 from math import ceil
 
 from stack_advisor import DefaultStackAdvisor
@@ -235,6 +236,55 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
       putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
 
+  def getAmsMemoryRecommendation(self, services, hosts):
+    # MB per sink in hbase heapsize
+    HEAP_PER_MASTER_COMPONENT = 50
+    HEAP_PER_SLAVE_COMPONENT = 10
+
+    schMemoryMap = {
+      "HDFS": {
+        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "DATANODE": HEAP_PER_SLAVE_COMPONENT
+      },
+      "YARN": {
+        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
+      },
+      "HBASE": {
+        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "ACCUMULO": {
+        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
+      },
+      "FLUME": {
+        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "STORM": {
+        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
+      },
+      "AMBARI_METRICS": {
+        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
+        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
+      }
+    }
+    total_sinks_count = 0
+    # minimum heap size
+    hbase_heapsize = 500
+    for serviceName, componentsDict in schMemoryMap.items():
+      for componentName, multiplier in componentsDict.items():
+        schCount = len(
+          self.getHostsWithComponent(serviceName, componentName, services,
+                                     hosts))
+        hbase_heapsize += int((schCount * multiplier) ** 0.9)
+        total_sinks_count += schCount
+    collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)
+
+    return collector_heapsize, hbase_heapsize, total_sinks_count
+
   def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
     putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
     putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
@@ -242,11 +292,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
 
     amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
-    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
-    putTimelineServiceProperty("timeline.metrics.host.aggregator.ttl", 86400)
 
     rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
     tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
@@ -256,16 +301,46 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
         tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
 
-    # TODO recommend configuration for multiple AMBARI_METRICS collectors
+    mountpoints = ["/"]
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          mountpoints = self.getPreferredMountPoints(host["Hosts"])
+          break
+    if not rootDir.startswith("hdfs://"):
+      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
+      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
+    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
+    if len(mountpoints) > 1 and not rootDir.startswith("hdfs://"):
+      tmpDir = os.path.join(mountpoints[1], tmpDir)
+    else:
+      tmpDir = os.path.join(mountpoints[0], tmpDir)
+    putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
+    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
+
+    putAmsEnvProperty("metrics_collector_heapsize", str(collector_heapsize) + "m")
+
+    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
+    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
+    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
+    putTimelineServiceProperty("timeline.metrics.host.aggregator.ttl", 86400)
+
+    # Embedded mode heap size : master + regionserver
+    if rootDir.startswith("hdfs://"):
+      putHbaseEnvProperty("hbase_master_heapsize", "512m")
+      putHbaseEnvProperty("hbase_regionserver_heapsize", str(hbase_heapsize) + "m")
+    else:
+      putHbaseEnvProperty("hbase_master_heapsize", str(hbase_heapsize) + "m")
+
     if len(amsCollectorHosts) > 1:
       pass
     else:
-      totalHostsCount = len(hosts["items"])
       # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
-      putHbaseEnvProperty("hbase_master_heapsize", "512m")
-      if totalHostsCount >= 400:
-        hbase_heapsize = "12288m"
-        putAmsEnvProperty("metrics_collector_heapsize", "8192m")
+      if total_sinks_count >= 2000:
         putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
         putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
         putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
@@ -276,49 +351,38 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
         putTimelineServiceProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
         putAmsHbaseSiteProperty("hbase_master_xmn_size", "512m")
         putAmsHbaseSiteProperty("regionserver_xmn_size", "512m")
-      elif totalHostsCount >= 100:
-        hbase_heapsize = "6144m"
-        putAmsEnvProperty("metrics_collector_heapsize", "4096m")
+      elif total_sinks_count >= 500:
         putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
         putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
         putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
         putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
         putAmsHbaseSiteProperty("hbase_master_xmn_size", "512m")
-      elif totalHostsCount >= 50:
-        hbase_heapsize = "2048m"
-        putAmsEnvProperty("metrics_collector_heapsize", "2048m")
+      elif total_sinks_count >= 250:
         putAmsHbaseSiteProperty("hbase_master_xmn_size", "256m")
       else:
-        # Embedded mode heap size : master + regionserver
-        hbase_heapsize = "512m"
-        putAmsEnvProperty("metrics_collector_heapsize", "512m")
         putAmsHbaseSiteProperty("hbase_master_xmn_size", "128m")
       pass
 
-      if rootDir.startswith("hdfs://"):
-        putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
-      else:
-        putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
+    #split points
+    scriptDir = os.path.dirname(os.path.abspath(__file__))
+    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
+    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+    sys.path.append(os.path.join(metricsDir, 'scripts'))
+    mode = 'distributed' if rootDir.startswith("hdfs://") else 'embedded'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
 
-    mountpoint = "/"
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          mountpoint = self.getProperMountPoint(host["Hosts"])
-          break
-    if not rootDir.startswith("hdfs://") :
-      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
-      rootDir = "file://" + os.path.join(mountpoint, rootDir)
-    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
-    tmpDir = os.path.join(mountpoint, tmpDir)
+    from split_points import FindSplitPointsForAMSRegions
+    split_point_finder = FindSplitPointsForAMSRegions(
+      configurations["ams-hbase-site"]["properties"],
+      configurations["ams-hbase-env"]["properties"],
+      serviceMetricsDir, mode, servicesList)
 
-    putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
-    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+    result = split_point_finder.get_split_points()
+    putTimelineServiceProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(result.precision))
+    putTimelineServiceProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(result.aggregate))
 
     pass
 
-
-
   def getHostsWithComponent(self, serviceName, componentName, services, hosts):
     if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
       service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
@@ -523,28 +587,20 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
     ams_site = getSiteProperties(configurations, "ams-site")
 
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
     recommendedDiskSpace = 10485760
     # TODO validate configuration for multiple AMBARI_METRICS collectors
     if len(amsCollectorHosts) > 1:
       pass
     else:
-      totalHostsCount = len(hosts["items"])
-      if totalHostsCount > 400:
+      if total_sinks_count > 2000:
         recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
-      elif totalHostsCount > 100:
+      elif total_sinks_count > 500:
         recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
-      elif totalHostsCount > 50:
+      elif total_sinks_count > 250:
         recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
 
-
     validationItems = []
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          validationItems.extend([ {"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
-          validationItems.extend([ {"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, 'hbase.rootdir', host["Hosts"])}])
-          validationItems.extend([ {"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, 'hbase.tmp.dir', host["Hosts"])}])
-          break
 
     rootdir_item = None
     op_mode = ams_site.get("timeline.metrics.service.operation.mode")
@@ -561,14 +617,45 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
                             {"config-name":'hbase.cluster.distributed', "item": distributed_item }])
 
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
+          validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, 'hbase.rootdir', host["Hosts"])}])
+          validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, 'hbase.tmp.dir', host["Hosts"])}])
+
+          # if METRICS_COLLECTOR is co-hosted with DATANODE
+          if not hbase_rootdir.startswith("hdfs") and \
+              collectorHostName in self.getComponentHostNames(services, "HDFS", "DATANODE"):
+            # cross-check dfs.datanode.data.dir and hbase.rootdir
+            # they shouldn't share same disk partition IO
+            hdfs_site = getSiteProperties(configurations, "hdfs-site")
+            mountPoints = []
+            for mountPoint in host["Hosts"]["disk_info"]:
+              mountPoints.append(mountPoint["mountpoint"])
+            hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)
+            if ams_site and hdfs_site and "dfs.datanode.data.dir" in hdfs_site:
+              for dfs_datadir in hdfs_site.get("dfs.datanode.data.dir").split(","):
+                mountPoints = []
+                for mountPoint in host["Hosts"]["disk_info"]:
+                  mountPoints.append(mountPoint["mountpoint"])
+                dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)
+                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
+                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
+                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
+                  validationItems.extend([{"config-name":'hbase.rootdir', "item": item}])
+                  break
+
     return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
 
   def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize")
+    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
     masterItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
     ams_env = getSiteProperties(configurations, "ams-env")
+    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
     logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir",
                                                   ams_env, "metrics_collector_log_dir")
+    validationItems = []
     masterHostItem = None
 
     if masterItem is None:
@@ -599,6 +686,27 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
               masterHostItem = self.getWarnItem(
                 masterHostMessage.format(
                   collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
+
+          # Check for unused RAM on AMS Collector node
+          hostComponents = []
+          for service in services["services"]:
+            for component in service["components"]:
+              if component["StackServiceComponents"]["hostnames"] is not None:
+                if collectorHostName in component["StackServiceComponents"]["hostnames"]:
+                  hostComponents.append(component["StackServiceComponents"]["component_name"])
+
+          requiredMemory = getMemorySizeRequired(hostComponents, configurations)
+          unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
+          hbase_rootdir = amsHbaseSite.get("hbase.rootdir")
+          if unusedMemory > 4294967296:  # warn user, if more than 4GB RAM is unused
+            propertyToIncrease = "hbase_regionserver_heapsize" if hbase_rootdir.startswith("hdfs://") else "hbase_master_heapsize"
+            collector_heapsize = int((unusedMemory - 4294967296)/5) + to_number(ams_env.get("metrics_collector_heapsize"))*1048576
+            hbase_heapsize = int((unusedMemory - 4294967296)*4/5) + to_number(properties.get(propertyToIncrease))*1048576
+            msg = "{0} MB RAM is unused on the host {1} based on components " \
+                  "assigned. Consider allocating  {2} MB to metrics_collector_heapsize in ams-env and {3} MB to {4} in ams-hbase-env"
+
+            unusedMemoryHbaseItem = self.getWarnItem(msg.format(unusedMemory/1048576, collectorHostName, collector_heapsize/1048576, hbase_heapsize/1048576, propertyToIncrease))
+            validationItems.extend([{"config-name": propertyToIncrease, "item": unusedMemoryHbaseItem}])
       pass
 
     # Check RS memory in distributed mode since we set default as 512m
@@ -608,11 +716,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if hbase_rootdir and hbase_rootdir.startswith("hdfs://"):
       regionServerMinMemItem = self.validateMinMemorySetting(properties, 1024, 'hbase_regionserver_heapsize')
 
-    validationItems = [{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem},
-                       {"config-name": "hbase_regionserver_heapsize", "item": regionServerMinMemItem},
-                       {"config-name": "hbase_master_heapsize", "item": masterItem},
-                       {"config-name": "hbase_master_heapsize", "item": masterHostItem},
-                       {"config-name": "hbase_log_dir", "item": logDirItem}]
+    validationItems.extend([
+      {"config-name": "hbase_regionserver_heapsize", "item": regionServerItem},
+      {"config-name": "hbase_regionserver_heapsize", "item": regionServerMinMemItem},
+      {"config-name": "hbase_master_heapsize", "item": masterItem},
+      {"config-name": "hbase_master_heapsize", "item": masterHostItem},
+      {"config-name": "hbase_log_dir", "item": logDirItem}
+    ])
     return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
 
 
@@ -635,23 +745,25 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
   def getErrorItem(self, message):
     return {"level": "ERROR", "message": message}
 
-  def getProperMountPoint(self, hostInfo):
+  def getPreferredMountPoints(self, hostInfo):
 
     # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
     undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
                               "/etc/hostname"]
     undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
     if hostInfo and "disk_info" in hostInfo:
-      mountPoints = {}
+      mountPointsDict = {}
       for mountpoint in hostInfo["disk_info"]:
         if not (mountpoint["mountpoint"] in undesirableMountPoints or
                 mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
                 mountpoint["type"] in undesirableFsTypes or
                 mountpoint["available"] == str(0)):
-          mountPoints[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
-      if mountPoints:
-        return max(mountPoints, key=mountPoints.get)
-    return "/"
+          mountPointsDict[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
 
   def validatorNotRootFs(self, properties, propertyName, hostInfo):
     if not propertyName in properties:
@@ -666,8 +778,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       mountPoints.append(mountPoint["mountpoint"])
     mountPoint = getMountPointForDir(dir, mountPoints)
 
-    if "/" == mountPoint and self.getProperMountPoint(hostInfo) != mountPoint:
-      return self.getWarnItem("The root device should not be used for {0}".format(propertyName))
+    if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:
+      return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName))
 
     return None
 
@@ -809,7 +921,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
 
   def getNotPreferableOnServerComponents(self):
-    return ['GANGLIA_SERVER']
+    return ['GANGLIA_SERVER', 'METRICS_COLLECTOR']
 
   def getCardinalitiesDict(self):
     return {
@@ -831,8 +943,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
       'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
       'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
-      'METRICS_COLLECTOR': {"else": 2},
-      }
+      'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, "else": 5},
+    }
 
   def get_system_min_uid(self):
     login_defs = '/etc/login.defs'
@@ -957,7 +1069,7 @@ def getMountPointForDir(dir, mountPoints):
   """
   bestMountFound = None
   if dir:
-    dir = dir.strip().lower()
+    dir = re.sub("^file:///", "", dir, count=1).strip().lower()
 
     # If the path is "/hadoop/hdfs/data", then possible matches for mounts could be
     # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data".

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 2bb5aad..4a06c1e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -74,7 +74,7 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
 
 
   def getNotPreferableOnServerComponents(self):
-    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER']
+    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER', 'METRICS_COLLECTOR']
 
   def getNotValuableComponents(self):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index f09488f..3f15c5f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -1177,11 +1177,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     result.extend(['METRICS_MONITOR'])
     return result
 
-  def getNotPreferableOnServerComponents(self):
-    result = super(HDP22StackAdvisor, self).getNotPreferableOnServerComponents()
-    result.extend(['METRICS_COLLECTOR'])
-    return result
-
   def getCardinalitiesDict(self):
     result = super(HDP22StackAdvisor, self).getCardinalitiesDict()
     result['METRICS_COLLECTOR'] = {"min": 1}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 60ca33d..8b3bee2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1083,11 +1083,11 @@ class TestHDP206StackAdvisor(TestCase):
 
   def test_getProperMountPoint(self):
     hostInfo = None
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     hostInfo = {"some_key": []}
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     hostInfo["disk_info"] = []
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # root mountpoint with low space available
     hostInfo["disk_info"].append(
       {
@@ -1096,7 +1096,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/"
       }
     )
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # tmpfs with more space available
     hostInfo["disk_info"].append(
       {
@@ -1105,7 +1105,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/dev/shm"
       }
     )
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # /boot with more space available
     hostInfo["disk_info"].append(
       {
@@ -1114,7 +1114,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/boot/grub"
       }
     )
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # /boot with more space available
     hostInfo["disk_info"].append(
       {
@@ -1123,7 +1123,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/mnt/external_hdd"
       }
     )
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # virtualbox fs with more space available
     hostInfo["disk_info"].append(
       {
@@ -1132,7 +1132,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/vagrant"
       }
     )
-    self.assertEquals("/", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # proper mountpoint with more space available
     hostInfo["disk_info"].append(
       {
@@ -1141,7 +1141,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/grid/0"
       }
     )
-    self.assertEquals("/grid/0", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
     # proper mountpoint with more space available
     hostInfo["disk_info"].append(
       {
@@ -1150,7 +1150,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/grid/1"
       }
     )
-    self.assertEquals("/grid/1", self.stackAdvisor.getProperMountPoint(hostInfo))
+    self.assertEquals(["/grid/1", "/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
 
   def test_validateNonRootFs(self):
     hostInfo = {"disk_info": [
@@ -1171,9 +1171,10 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/grid/0"
       }
     )
+
     warn = self.stackAdvisor.validatorNotRootFs(properties, 'property1', hostInfo)
     self.assertFalse(warn == None)
-    self.assertEquals({'message': 'The root device should not be used for property1', 'level': 'WARN'}, warn)
+    self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
 
     # Set by user /var mountpoint, which is non-root , but not preferable - no warning
     hostInfo["disk_info"].append(

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ec86cd8/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 7443b51..6ce40ea 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -1938,12 +1938,19 @@ class TestHDP22StackAdvisor(TestCase):
       "services":  [ {
         "StackServices": {
           "service_name": "AMBARI_METRICS"
-        },"components": [{
+        },
+        "components": [{
           "StackServiceComponents": {
             "component_name": "METRICS_COLLECTOR",
             "hostnames": ["host1"]
           }
 
+        }, {
+          "StackServiceComponents": {
+            "component_name": "METRICS_MONITOR",
+            "hostnames": ["host1"]
+          }
+
         }]
       }],
       "configurations": []
@@ -1957,10 +1964,11 @@ class TestHDP22StackAdvisor(TestCase):
       }]
     }
 
+    # 1-node cluster
     expected = {
       "ams-hbase-env": {
         "properties": {
-          "hbase_master_heapsize": "512m"
+          "hbase_master_heapsize": "540m"
           }
       },
       "ams-env": {
@@ -1972,6 +1980,7 @@ class TestHDP22StackAdvisor(TestCase):
         "properties": {
           "hbase.regionserver.global.memstore.lowerLimit": "0.3",
           "hbase.regionserver.global.memstore.upperLimit": "0.35",
+          "hbase.hregion.memstore.flush.size": "134217728",
           "hfile.block.cache.size": "0.3",
           "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase",
           "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
@@ -1980,13 +1989,118 @@ class TestHDP22StackAdvisor(TestCase):
       },
       "ams-site": {
         "properties": {
+          "timeline.metrics.cluster.aggregate.splitpoints": "",
+          "timeline.metrics.host.aggregate.splitpoints": "",
           "timeline.metrics.host.aggregator.ttl": "86400"
         }
       }
     }
     self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
-    
+
+    # 100-nodes cluster, but still only 1 sink (METRICS_COLLECTOR)
+    for i in range(2, 101):
+      hosts['items'].extend([{
+        "Hosts": {
+          "host_name": "host" + str(i)
+          }
+      }])
+
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "AMBARI_METRICS"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "METRICS_COLLECTOR",
+              "hostnames": ["host1"]
+            }
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "METRICS_MONITOR",
+              "hostnames": ["host" + str(i) for i in range(1, 101)]
+            }
+          }
+        ]
+      }
+    ]
+    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '1034m'
+    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '512m'
+
+    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+    # Still 100 nodes, but with HDFS and YARN services installed on all nodes
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "HDFS"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NAMENODE",
+              "hostnames": ["host1"]
+            }
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DATANODE",
+              "hostnames": ["host" + str(i) for i in range(1, 101)]
+            }
+          }
+        ]
+      },
+      {
+        "StackServices": {
+          "service_name": "YARN"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "RESOURCEMANAGER",
+              "hostnames": ["host1"]
+            }
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["host" + str(i) for i in range(1, 101)]
+            }
+          }
+        ]
+      },
+      {
+        "StackServices": {
+          "service_name": "AMBARI_METRICS"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "METRICS_COLLECTOR",
+              "hostnames": ["host1"]
+            }
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "METRICS_MONITOR",
+              "hostnames": ["host" + str(i) for i in range(1, 101)]
+            }
+          }
+        ]
+      }
+
+    ]
+    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '1601m'
+    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '512m'
+    # expected["ams-hbase-site"]['properties']['hbase_master_xmn_size'] = '256m'
+
+    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
   def test_recommendHbaseConfigurations(self):
     servicesList = ["HBASE"]
     configurations = {}


Mime
View raw message