ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject [24/50] [abbrv] ambari git commit: Revert "AMBARI-18713 use exclude list of mount device types on docker containers (dsen)"
Date Mon, 28 Nov 2016 19:23:18 GMT
Revert "AMBARI-18713 use exclude list of mount device types on docker containers (dsen)"


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/90967bcd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/90967bcd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/90967bcd

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 90967bcd0df7a5ab089611ee5502bb90d0084a0a
Parents: fbf636d
Author: Dmytro Sen <dsen@apache.org>
Authored: Wed Nov 23 18:47:20 2016 +0200
Committer: Dmytro Sen <dsen@apache.org>
Committed: Wed Nov 23 18:47:20 2016 +0200

----------------------------------------------------------------------
 .../src/main/resources/scripts/stack_advisor.py |  11 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |  10 -
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  50 +-
 .../stacks/HDP/2.1/services/stack_advisor.py    |  20 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    |   7 -
 .../src/main/resources/stacks/stack_advisor.py  | 200 +-------
 .../stacks/2.0.6/common/test_stack_advisor.py   |  34 +-
 .../stacks/2.1/common/test_stack_advisor.py     |   5 -
 .../stacks/2.2/common/test_stack_advisor.py     |  20 +-
 .../test/python/stacks/test_stack_adviser.py    | 239 ----------
 ambari-web/app/mixins.js                        |   1 +
 .../app/utils/configs/config_initializer.js     |  28 +-
 .../mount_points_based_initializer_mixin.js     | 340 ++++++++++++++
 ambari-web/test/utils/ajax/ajax_test.js         |   9 +-
 .../utils/configs/config_initializer_test.js    | 458 +++++++++++++++++++
 15 files changed, 855 insertions(+), 577 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index abfab87..5926c39 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,11 +70,13 @@ def main(argv=None):
   if len(args) < 3:
     sys.stderr.write(USAGE)
     sys.exit(2)
+    pass
 
   action = args[0]
   if action not in ALL_ACTIONS:
     sys.stderr.write(USAGE)
     sys.exit(2)
+    pass
 
   hostsFile = args[1]
   servicesFile = args[2]
@@ -87,7 +89,6 @@ def main(argv=None):
   stackName = services["Versions"]["stack_name"]
   stackVersion = services["Versions"]["stack_version"]
   parentVersions = []
-
   if "stack_hierarchy" in services["Versions"]:
     parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
 
@@ -95,9 +96,8 @@ def main(argv=None):
 
   # Perform action
   actionDir = os.path.realpath(os.path.dirname(args[1]))
-
-  # filter
-  hosts = stackAdvisor.filterHostMounts(hosts, services)
+  result = {}
+  result_file = "non_valid_result_file.json"
 
   if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
     result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,11 +111,12 @@ def main(argv=None):
   elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
     result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
-  else:  # action == VALIDATE_CONFIGURATIONS
+  else: # action == VALIDATE_CONFIGURATIONS
     result = stackAdvisor.validateConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations-validation.json")
 
   dumpJson(result, result_file)
+  pass
 
 
 def instantiateStackAdvisor(stackName, stackVersion, parentVersions):

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index bf257a3..0d313cc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -274,14 +274,4 @@ gpgcheck=0</value>
     <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>agent_mounts_ignore_list</name>
-    <value/>
-    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Adviser</description>
-    <on-ambari-upgrade add="true"/>
-    <value-attributes>
-      <visible>true</visible>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 9b5ff68..83014b7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -101,23 +101,9 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
       "AMBARI_METRICS": self.recommendAmsConfigurations,
-      "RANGER": self.recommendRangerConfigurations,
-      "ZOOKEEPER": self.recommendZookeeperConfigurations,
-      "OOZIE": self.recommendOozieConfigurations
+      "RANGER": self.recommendRangerConfigurations
     }
 
-  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
-    oozie_mount_properties = [
-      ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
-    ]
-    self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
-
-  def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
-    zk_mount_properties = [
-      ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
-    ]
-    self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
-
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
     putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -130,15 +116,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
-    yarn_mount_properties = [
-      ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
-      ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
-      ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
-      ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
-    ]
-
-    self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
-
     sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
     if sc_queue_name is not None:
       putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -169,13 +146,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
     putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
     putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
-
-    mapred_mounts = [
-      ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
-    ]
-
-    self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
-
     mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
     if mr_queue is not None:
       putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -423,18 +393,12 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       if len(namenodes.split(',')) > 1:
         putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
 
-    hdfs_mount_properties = [
-      ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
-      ("dfs.name.dir", "NAMENODE", "/hadoop/hdfs/namenode", "multi"),
-      ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
-      ("dfs.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
-      ("fs.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single"),
-      ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
-    ]
-
-    self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
-
-    dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+    #Initialize default 'dfs.datanode.data.dir' if needed
+    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
+      dataDirs = '/hadoop/hdfs/data'
+      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
+    else:
+      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
 
     # dfs.datanode.du.reserved should be set to 10-15% of volume size
     # For each host selects maximum size of the volume. Then gets minimum for all hosts.

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 17225d0..9678dc1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,30 +24,12 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
     childRecommendConfDict = {
       "OOZIE": self.recommendOozieConfigurations,
       "HIVE": self.recommendHiveConfigurations,
-      "TEZ": self.recommendTezConfigurations,
-      "STORM": self.recommendStormConfigurations,
-      "FALCON": self.recommendFalconConfigurations
+      "TEZ": self.recommendTezConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
-  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
-    storm_mounts = [
-      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
-    ]
-
-    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
-
-  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
-    falcon_mounts = [
-      ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
-    ]
-
-    self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
-
   def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
-    super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
-
     oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
     oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
     putOozieProperty = self.putProperty(configurations, "oozie-site", services)

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index feafc04..a8a75e5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,17 +44,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "LOGSEARCH" : self.recommendLogsearchConfigurations,
       "SPARK": self.recommendSparkConfigurations,
-      "KAFKA": self.recommendKafkaConfigurations,
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
-  def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
-    kafka_mounts = [
-      ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
-    ]
-
-    self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
 
   def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 66d1387..f6191f8 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -972,18 +972,6 @@ class DefaultStackAdvisor(StackAdvisor):
       return None
     return siteConfig.get("properties")
 
-  def getServicesSiteProperties(self, services, siteName):
-    if not services:
-      return None
-
-    configurations = services.get("configurations")
-    if not configurations:
-      return None
-    siteConfig = configurations.get(siteName)
-    if siteConfig is None:
-      return None
-    return siteConfig.get("properties")
-
   def putProperty(self, config, configType, services=None):
     userConfigs = {}
     changedConfigs = []
@@ -1052,27 +1040,14 @@ class DefaultStackAdvisor(StackAdvisor):
       config[configType]["property_attributes"][key][attribute] = attributeValue if isinstance(attributeValue, list) else str(attributeValue)
     return appendPropertyAttribute
 
+
+  """
+  Returns the hosts which are running the given component.
+  """
   def getHosts(self, componentsList, componentName):
-    """
-    Returns the hosts which are running the given component.
-    """
     hostNamesList = [component["hostnames"] for component in componentsList if component["component_name"] == componentName]
     return hostNamesList[0] if len(hostNamesList) > 0 else []
 
-  def getMountPoints(self, hosts):
-    """
-    Return list of mounts available on the hosts
-
-    :type hosts dict
-    """
-    mount_points = []
-
-    for item in hosts["items"]:
-      if "disk_info" in item["Hosts"]:
-        mount_points.append(item["Hosts"]["disk_info"])
-
-    return mount_points
-
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.
@@ -1109,170 +1084,3 @@ class DefaultStackAdvisor(StackAdvisor):
 
   def getServiceNames(self, services):
     return [service["StackServices"]["service_name"] for service in services["services"]]
-
-  def filterHostMounts(self, hosts, services):
-    """
-    Filter mounts on the host using agent_mounts_ignore_list, by excluding and record with mount-point
-     mentioned in agent_mounts_ignore_list.
-
-    This function updates hosts dictionary
-
-    Example:
-
-      agent_mounts_ignore_list : "/run/secrets"
-
-      Hosts record :
-
-       "disk_info" : [
-          {
-              ...
-            "mountpoint" : "/"
-          },
-          {
-              ...
-            "mountpoint" : "/run/secrets"
-          }
-        ]
-
-      Result would be :
-
-        "disk_info" : [
-          {
-              ...
-            "mountpoint" : "/"
-          }
-        ]
-
-    :type hosts dict
-    :type services dict
-    """
-    if not services:
-      return hosts
-
-    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
-    ignore_list = []
-
-    if not cluster_env or "items" not in hosts:
-      return hosts
-
-    if "agent_mounts_ignore_list" in cluster_env and cluster_env["agent_mounts_ignore_list"].strip():
-      ignore_list = [x.strip() for x in cluster_env["agent_mounts_ignore_list"].strip().split(",")]
-
-    for host in hosts["items"]:
-      if "Hosts" not in host and "disk_info" not in host["Hosts"]:
-        continue
-
-      host = host["Hosts"]
-      host["disk_info"] = [disk for disk in host["disk_info"] if disk["mountpoint"] not in ignore_list]
-
-    return hosts
-
-  def __getSameHostMounts(self, hosts):
-    """
-    Return list of the mounts which are same and present on all hosts
-
-    :type hosts dict
-    :rtype list
-    """
-    if not hosts:
-      return None
-
-    hostMounts = self.getMountPoints(hosts)
-    mounts = []
-    for m in hostMounts:
-      host_mounts = set([item["mountpoint"] for item in m])
-      mounts = host_mounts if not mounts else mounts & host_mounts
-
-    return sorted(mounts)
-
-  def getMountPathVariations(self, initial_value, component_name, services, hosts):
-    """
-    Recommends best fitted mount by prefixing path with it.
-
-    :return return list of paths with properly selected paths. If no recommendation possible,
-     would be returned empty list
-
-    :type initial_value str
-    :type component_name str
-    :type services dict
-    :type hosts dict
-    :rtype list
-    """
-    available_mounts = []
-
-    if not initial_value:
-      return available_mounts
-
-    mounts = self.__getSameHostMounts(hosts)
-    sep = "/"
-
-    if not mounts:
-      return available_mounts
-
-    for mount in mounts:
-      new_mount = initial_value if mount == "/" else os.path.join(mount + sep, initial_value.lstrip(sep))
-      if new_mount not in available_mounts:
-        available_mounts.append(new_mount)
-
-    # no list transformations after filling the list, because this will cause item order change
-    return available_mounts
-
-  def getMountPathVariation(self, initial_value, component_name, services, hosts):
-    """
-    Recommends best fitted mount by prefixing path with it.
-
-    :return return list of paths with properly selected paths. If no recommendation possible,
-     would be returned empty list
-
-    :type initial_value str
-        :type component_name str
-    :type services dict
-    :type hosts dict
-    :rtype str
-    """
-    try:
-      return [self.getMountPathVariations(initial_value, component_name, services, hosts)[0]]
-    except IndexError:
-      return []
-
-  def updateMountProperties(self, siteConfig, propertyDefinitions, configurations,  services, hosts):
-    """
-    Update properties according to recommendations for available mount-points
-
-    propertyDefinitions is an array of set : property name, component name, initial value, recommendation type
-
-     Where,
-
-       property name - name of the property
-       component name, name of the component to which belongs this property
-       initial value - initial path
-       recommendation type - could be "multi" or "single". This describes recommendation strategy, to use only one disk
-        or use all available space on the host
-
-    :type propertyDefinitions list
-    :type siteConfig str
-    :type configurations dict
-    :type services dict
-    :type hosts dict
-    """
-
-    props = self.getServicesSiteProperties(services, siteConfig)
-    put_f = self.putProperty(configurations, siteConfig, services)
-
-    for prop_item in propertyDefinitions:
-      name, component, default_value, rc_type = prop_item
-      recommendation = None
-
-      if props is None or name not in props:
-        if rc_type == "multi":
-          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
-        else:
-          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
-      elif props and name in props and props[name] == default_value:
-        if rc_type == "multi":
-          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
-        else:
-          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
-
-      if recommendation:
-        put_f(name, ",".join(recommendation))

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 00ee563..9595b9e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1195,13 +1195,8 @@ class TestHDP206StackAdvisor(TestCase):
                   {'properties':
                      {'falcon_user': 'falcon'}},
                 'hdfs-site':
-                  {'properties':
-                     {'dfs.data.dir': '/hadoop/hdfs/data',
-                      'dfs.datanode.data.dir': '/hadoop/hdfs/data',
-                      'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
+                  {'properties': 
+                     {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1334,12 +1329,7 @@ class TestHDP206StackAdvisor(TestCase):
                      {'falcon_user': 'falcon'}},
                 'hdfs-site':
                   {'properties':
-                     {'dfs.data.dir': '/hadoop/hdfs/data',
-                      'dfs.datanode.data.dir': '/hadoop/hdfs/data',
-                      'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
+                     {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1473,13 +1463,8 @@ class TestHDP206StackAdvisor(TestCase):
                      {'hive_user': 'hive',
                       'webhcat_user': 'webhcat'}},
                 'hdfs-site':
-                  {'properties':
-                     {'dfs.data.dir': '/hadoop/hdfs/data',
-                      'dfs.datanode.data.dir': '/hadoop/hdfs/data',
-                      'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.name.dir': '/hadoop/hdfs/namenode',
-                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
+                  {'properties': 
+                     {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hadoop-env':
                   {'properties':
@@ -1499,15 +1484,10 @@ class TestHDP206StackAdvisor(TestCase):
 
     expected["hdfs-site"] = {
       'properties': {
+        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
         'dfs.datanode.du.reserved': '10240000000',
         'dfs.internal.nameservices': 'mycluster',
-        'dfs.ha.namenodes.mycluster': 'nn1,nn2',
-        'dfs.data.dir': '/hadoop/hdfs/data',
-        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
-        'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-        'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-        'dfs.name.dir': '/hadoop/hdfs/namenode',
-        'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
+        'dfs.ha.namenodes.mycluster': 'nn1,nn2'
       },
       'property_attributes': {
         'dfs.namenode.rpc-address': {

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index a9b36b1..7835262 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -486,12 +486,7 @@ class TestHDP21StackAdvisor(TestCase):
       },
       "hdfs-site": {
         "properties": {
-          'dfs.data.dir': '/hadoop/hdfs/data',
           'dfs.datanode.data.dir': '/hadoop/hdfs/data',
-          'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-          'dfs.name.dir': '/hadoop/hdfs/namenode',
-          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
           'dfs.datanode.du.reserved': '10240000000'
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 53c92ae..3cd05d3 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3068,12 +3068,7 @@ class TestHDP22StackAdvisor(TestCase):
           'dfs.namenode.safemode.threshold-pct': '1.000',
           'dfs.datanode.failed.volumes.tolerated': '1',
           'dfs.namenode.handler.count': '25',
-          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4',
-          'dfs.data.dir': '/hadoop/hdfs/data',
-          'fs.checkpoint.dir': '/hadoop/hdfs/namesecondary',
-          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
-          'dfs.name.dir': '/hadoop/hdfs/namenode',
-          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary'
+          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
         },
         'property_attributes': {
           'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'},
@@ -3748,11 +3743,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
           "yarn.scheduler.maximum-allocation-mb": "39424",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
-          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
-          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
-          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           "yarn.scheduler.minimum-allocation-vcores": {
@@ -3798,6 +3789,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.linux-container-executor.group": "hadoop",
           "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
+          "yarn.nodemanager.linux-container-executor.group": "hadoop",
           "yarn.nodemanager.linux-container-executor.cgroups.mount": "true",
           "yarn.nodemanager.resource.memory-mb": "39424",
           "yarn.scheduler.minimum-allocation-mb": "3584",
@@ -3807,11 +3799,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
           "yarn.scheduler.maximum-allocation-mb": "39424",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
-          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
-          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
-          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           "yarn.nodemanager.linux-container-executor.cgroups.mount": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-server/src/test/python/stacks/test_stack_adviser.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/test_stack_adviser.py b/ambari-server/src/test/python/stacks/test_stack_adviser.py
deleted file mode 100644
index 2c171f1..0000000
--- a/ambari-server/src/test/python/stacks/test_stack_adviser.py
+++ /dev/null
@@ -1,239 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-from unittest import TestCase
-
-
-class TestBasicAdvisor(TestCase):
-  def setUp(self):
-    import imp
-    self.maxDiff = None
-    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
-    stackAdvisorPath = os.path.abspath(os.path.join(self.testDirectory, '../../../main/resources/stacks/stack_advisor.py'))
-
-    default_sa_classname = 'DefaultStackAdvisor'
-
-    with open(stackAdvisorPath, 'rb') as fp:
-      stack_advisor_impl = imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
-
-    clazz = getattr(stack_advisor_impl, default_sa_classname)
-    self.stackAdvisor = clazz()
-
-  def test_filterHostMounts(self):
-
-    filtered_mount = "/data"
-
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": filtered_mount},
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          },
-        },
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm1"},
-              {"mountpoint": "/vagrant1"},
-              {"mountpoint": filtered_mount}
-            ],
-            "public_host_name": "c6402.ambari.apache.org",
-            "host_name": "c6402.ambari.apache.org"
-          },
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [
-      ],
-      "configurations": {
-        "cluster-env": {
-          "properties": {
-            "agent_mounts_ignore_list": filtered_mount
-          }
-        }
-      }
-    }
-
-    filtered_hosts = self.stackAdvisor.filterHostMounts(hosts, services)
-
-    for host in filtered_hosts["items"]:
-      self.assertEquals(False, filtered_mount in host["Hosts"]["disk_info"])
-
-  def test_getMountPathVariations(self):
-
-    filtered_mount = "/data"
-
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": filtered_mount},
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          },
-        },
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm1"},
-              {"mountpoint": "/vagrant1"},
-              {"mountpoint": filtered_mount}
-            ],
-            "public_host_name": "c6402.ambari.apache.org",
-            "host_name": "c6402.ambari.apache.org"
-          },
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [
-      ],
-      "configurations": {
-        "cluster-env": {
-          "properties": {
-            "agent_mounts_ignore_list": filtered_mount
-          }
-        }
-      }
-    }
-
-    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
-    avail_mounts = self.stackAdvisor.getMountPathVariations("/test/folder", "DATANODE", services, hosts)
-
-    self.assertEquals(True, avail_mounts is not None)
-    self.assertEquals(1, len(avail_mounts))
-    self.assertEquals("/test/folder", avail_mounts[0])
-
-  def test_updateMountProperties(self):
-    hosts = {
-      "items": [
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/dev/shm"},
-              {"mountpoint": "/vagrant"},
-              {"mountpoint": "/data"},
-            ],
-            "public_host_name": "c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org"
-          },
-        },
-        {
-          "Hosts": {
-            "cpu_count": 4,
-            "total_mem": 50331648,
-            "disk_info": [
-              {"mountpoint": "/"},
-              {"mountpoint": "/me"},
-              {"mountpoint": "/dev/shm1"},
-              {"mountpoint": "/vagrant1"},
-              {"mountpoint": "/data"}
-            ],
-            "public_host_name": "c6402.ambari.apache.org",
-            "host_name": "c6402.ambari.apache.org"
-          },
-        }
-      ]
-    }
-
-    services = {
-      "Versions": {
-        "parent_stack_version": "2.5",
-        "stack_name": "HDP",
-        "stack_version": "2.6",
-        "stack_hierarchy": {
-          "stack_name": "HDP",
-          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
-        }
-      },
-      "services": [
-      ],
-      "configurations": {
-        "cluster-env": {
-          "properties": {
-            "agent_mounts_ignore_list": ""
-          }
-        },
-        "some-site": {
-          "path_prop": "/test"
-        }
-      }
-    }
-
-    pathProperties = [
-      ("path_prop", "DATANODE", "/test", "multi"),
-    ]
-
-    configurations = {}
-
-    self.stackAdvisor.updateMountProperties("some-site", pathProperties, configurations, services, hosts)
-
-    self.assertEquals("/test,/data/test", configurations["some-site"]["properties"]["path_prop"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-web/app/mixins.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins.js b/ambari-web/app/mixins.js
index 7b4d6b4..594ed74 100644
--- a/ambari-web/app/mixins.js
+++ b/ambari-web/app/mixins.js
@@ -69,3 +69,4 @@ require('mixins/common/widgets/widget_mixin');
 require('mixins/common/widgets/widget_section');
 require('mixins/unit_convert/base_unit_convert_mixin');
 require('mixins/unit_convert/convert_unit_widget_view_mixin');
+require('utils/configs/mount_points_based_initializer_mixin');

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-web/app/utils/configs/config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_initializer.js b/ambari-web/app/utils/configs/config_initializer.js
index de9ca7e..cb5b41f 100644
--- a/ambari-web/app/utils/configs/config_initializer.js
+++ b/ambari-web/app/utils/configs/config_initializer.js
@@ -20,6 +20,7 @@ var App = require('app');
 var stringUtils = require('utils/string_utils');
 
 require('utils/configs/config_initializer_class');
+require('utils/configs/mount_points_based_initializer_mixin');
 require('utils/configs/hosts_based_initializer_mixin');
 
 /**
@@ -52,7 +53,7 @@ function getZKBasedConfig() {
  *
  * @instance ConfigInitializer
  */
-App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitializerMixin, {
+App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedInitializerMixin, App.HostsBasedInitializerMixin, {
 
   initializers: function() {
     return {
@@ -110,7 +111,26 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitiali
       'templeton.zookeeper.hosts': getZKBasedConfig(),
       'hadoop.registry.zk.quorum': getZKBasedConfig(),
       'hive.cluster.delegation.token.store.zookeeper.connectString': getZKBasedConfig(),
-      'instance.zookeeper.host': getZKBasedConfig()
+      'instance.zookeeper.host': getZKBasedConfig(),
+
+      'dfs.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
+      'dfs.namenode.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
+      'dfs.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
+      'dfs.datanode.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
+      'yarn.nodemanager.local-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
+      'yarn.nodemanager.log-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
+      'mapred.local.dir': this.getMultipleMountPointsConfig(['TASKTRACKER', 'NODEMANAGER']),
+      'log.dirs': this.getMultipleMountPointsConfig('KAFKA_BROKER'),
+
+      'fs.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
+      'dfs.namenode.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
+      'yarn.timeline-service.leveldb-timeline-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
+      'yarn.timeline-service.leveldb-state-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
+      'dataDir': this.getSingleMountPointConfig('ZOOKEEPER_SERVER'),
+      'oozie_data_dir': this.getSingleMountPointConfig('OOZIE_SERVER'),
+      'storm.local.dir': this.getSingleMountPointConfig(['NODEMANAGER', 'NIMBUS']),
+      '*.falcon.graph.storage.directory': this.getSingleMountPointConfig('FALCON_SERVER'),
+      '*.falcon.graph.serialize.path': this.getSingleMountPointConfig('FALCON_SERVER')
     }
   }.property(''),
 
@@ -126,7 +146,9 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitiali
   },
 
   initializerTypes: [
-    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'}
+    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'},
+    {name: 'single_mountpoint', method: '_initAsSingleMountPoint'},
+    {name: 'multiple_mountpoints', method: '_initAsMultipleMountPoints'}
   ],
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js b/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
new file mode 100644
index 0000000..59a3985
--- /dev/null
+++ b/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
@@ -0,0 +1,340 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+/**
+ * Regexp used to determine if mount point is windows-like
+ *
+ * @type {RegExp}
+ */
+var winRegex = /^([a-z]):\\?$/;
+
+App.MountPointsBasedInitializerMixin = Em.Mixin.create({
+
+  /**
+   * Map for methods used as value-modifiers for configProperties with values as mount point(s)
+   * Used if mount point is win-like (@see winRegex)
+   * Key: id
+   * Value: method-name
+   *
+   * @type {{default: string, file: string, slashes: string}}
+   */
+  winReplacersMap: {
+    default: '_defaultWinReplace',
+    file: '_winReplaceWithFile',
+    slashes: '_defaultWinReplaceWithAdditionalSlashes'
+  },
+
+  /**
+   * Initializer for configs with value as one of the possible mount points
+   * Only hosts that contains on the components from <code>initializer.components</code> are processed
+   * Hosts with Windows needs additional processing (@see winReplacersMap)
+   * Value example: '/', '/some/cool/dir'
+   *
+   * @param {configProperty} configProperty
+   * @param {topologyLocalDB} localDB
+   * @param {object} dependencies
+   * @param {object} initializer
+   * @return {Object}
+   */
+  _initAsSingleMountPoint: function (configProperty, localDB, dependencies, initializer) {
+    var hostsInfo = this._updateHostInfo(localDB.hosts);
+    var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
+    var winReplacersMap = this.get('winReplacersMap');
+    // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
+    if (!setOfHostNames.length) {
+      return configProperty;
+    }
+    var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
+
+    var mPoint = allMountPoints[0].mountpoint;
+    if (mPoint === "/") {
+      mPoint = Em.get(configProperty, 'recommendedValue');
+    }
+    else {
+      var mp = mPoint.toLowerCase();
+      if (winRegex.test(mp)) {
+        var methodName = winReplacersMap[initializer.winReplacer];
+        mPoint = this[methodName].call(this, configProperty, mp);
+      }
+      else {
+        mPoint = mPoint + Em.get(configProperty, 'recommendedValue');
+      }
+    }
+    Em.setProperties(configProperty, {
+      value: mPoint,
+      recommendedValue: mPoint
+    });
+
+    return configProperty;
+  },
+
+  /**
+   * Initializer for configs with value as all of the possible mount points
+   * Only hosts that contains on the components from <code>initializer.components</code> are processed
+   * Hosts with Windows needs additional processing (@see winReplacersMap)
+   * Value example: '/\n/some/cool/dir' (`\n` - is divider)
+   *
+   * @param {Object} configProperty
+   * @param {topologyLocalDB} localDB
+   * @param {object} dependencies
+   * @param {object} initializer
+   * @return {Object}
+   */
+  _initAsMultipleMountPoints: function (configProperty, localDB, dependencies, initializer) {
+    var hostsInfo = this._updateHostInfo(localDB.hosts);
+    var self = this;
+    var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
+    var winReplacersMap = this.get('winReplacersMap');
+    // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
+    if (!setOfHostNames.length) {
+      return configProperty;
+    }
+
+    var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
+    var mPoint = '';
+
+    allMountPoints.forEach(function (eachDrive) {
+      if (eachDrive.mountpoint === '/') {
+        mPoint += Em.get(configProperty, 'recommendedValue') + "\n";
+      }
+      else {
+        var mp = eachDrive.mountpoint.toLowerCase();
+        if (winRegex.test(mp)) {
+          var methodName = winReplacersMap[initializer.winReplacer];
+          mPoint += self[methodName].call(this, configProperty, mp);
+        }
+        else {
+          mPoint += eachDrive.mountpoint + Em.get(configProperty, 'recommendedValue') + "\n";
+        }
+      }
+    }, this);
+
+    Em.setProperties(configProperty, {
+      value: mPoint,
+      recommendedValue: mPoint
+    });
+
+    return configProperty;
+  },
+
+  /**
+   * Replace drive-based windows-path with 'file:///'
+   *
+   * @param {configProperty} configProperty
+   * @param {string} mountPoint
+   * @returns {string}
+   * @private
+   */
+  _winReplaceWithFile: function (configProperty, mountPoint) {
+    var winDriveUrl = mountPoint.toLowerCase().replace(winRegex, 'file:///$1:');
+    return winDriveUrl + Em.get(configProperty, 'recommendedValue') + '\n';
+  },
+
+  /**
+   * Replace drive-based windows-path
+   *
+   * @param {configProperty} configProperty
+   * @param {string} mountPoint
+   * @returns {string}
+   * @private
+   */
+  _defaultWinReplace: function (configProperty, mountPoint) {
+    var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
+    var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\');
+    return winDrive + winDir + '\n';
+  },
+
+  /**
+   * Same to <code>_defaultWinReplace</code>, but with extra-slash in the end
+   *
+   * @param {configProperty} configProperty
+   * @param {string} mountPoint
+   * @returns {string}
+   * @private
+   */
+  _defaultWinReplaceWithAdditionalSlashes: function (configProperty, mountPoint) {
+    var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
+    var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\\\');
+    return winDrive + winDir + '\n';
+  },
+
+  /**
+   * Update information from localDB using <code>App.Host</code>-model
+   *
+   * @param {object} hostsInfo
+   * @returns {object}
+   * @private
+   */
+  _updateHostInfo: function (hostsInfo) {
+    App.Host.find().forEach(function (item) {
+      if (!hostsInfo[item.get('id')]) {
+        hostsInfo[item.get('id')] = {
+          name: item.get('id'),
+          cpu: item.get('cpu'),
+          memory: item.get('memory'),
+          disk_info: item.get('diskInfo'),
+          bootStatus: "REGISTERED",
+          isInstalled: true
+        };
+      }
+    });
+    return hostsInfo;
+  },
+
+  /**
+   * Determines if mount point is valid
+   * Criterias:
+   * <ul>
+   *   <li>Should has available space</li>
+   *   <li>Should not be home-dir</li>
+   *   <li>Should not be docker-dir</li>
+   *   <li>Should not be boot-dir</li>
+   *   <li>Should not be dev-dir</li>
+   *   <li>Valid mount point started from /usr/hdp/ should be /usr/hdp/current
+   *       or /usr/hdp/<STACK_VERSION_NUMBER> e.g. /usr/hdp/2.5.0.0
+   *   </li>
+   * </ul>
+   *
+   * @param {{mountpoint: string, available: number}} mPoint
+   * @returns {function} true - valid, false - invalid
+   * @private
+   */
+  _filterMountPoint: function (localDB) {
+    var stackVersionNumber = [Em.getWithDefault(localDB.selectedStack || {}, 'repository_version', null)].compact();
+    return function(mPoint) {
+      var isAvailable = mPoint.available !== 0;
+      if (!isAvailable) {
+        return false;
+      }
+
+      var stackRoot = '/usr/hdp';
+      var notHome = !['/', '/home'].contains(mPoint.mountpoint);
+      var notDocker = !['/etc/resolv.conf', '/etc/hostname', '/etc/hosts'].contains(mPoint.mountpoint);
+      var notBoot = mPoint.mountpoint && !(mPoint.mountpoint.startsWith('/boot')
+                                           || mPoint.mountpoint.startsWith('/mnt')
+                                           || mPoint.mountpoint.startsWith('/tmp'));
+      var notDev = !(['devtmpfs', 'tmpfs', 'vboxsf', 'CDFS'].contains(mPoint.type));
+      var validStackRootMount = !(mPoint.mountpoint.startsWith(stackRoot) && !['current'].concat(stackVersionNumber).filter(function(i) {
+        return mPoint.mountpoint === stackRoot + '/' + i;
+      }).length);
+
+      return notHome && notDocker && notBoot && notDev && validStackRootMount;
+    };
+  },
+
+  /**
+   * Get list of hostNames from localDB which contains needed components
+   *
+   * @param {topologyLocalDB} localDB
+   * @param {object} initializer
+   * @returns {string[]}
+   * @private
+   */
+  _getSetOfHostNames: function (localDB, initializer) {
+    var masterComponentHostsInDB = Em.getWithDefault(localDB, 'masterComponentHosts', []);
+    var slaveComponentHostsInDB = Em.getWithDefault(localDB, 'slaveComponentHosts', []);
+    var hosts = masterComponentHostsInDB.filter(function (master) {
+      return initializer.components.contains(master.component);
+    }).mapProperty('hostName');
+
+    var sHosts = slaveComponentHostsInDB.find(function (slave) {
+      return initializer.components.contains(slave.componentName);
+    });
+    if (sHosts) {
+      hosts = hosts.concat(sHosts.hosts.mapProperty('hostName'));
+    }
+    return hosts;
+  },
+
+  /**
+   * Get list of all unique valid mount points for hosts
+   *
+   * @param {string[]} setOfHostNames
+   * @param {object} hostsInfo
+   * @param {topologyLocalDB} localDB
+   * @returns {string[]}
+   * @private
+   */
+  _getAllMountPoints: function (setOfHostNames, hostsInfo, localDB) {
+    var allMountPoints = [],
+        mountPointFilter = this._filterMountPoint(localDB);
+    for (var i = 0; i < setOfHostNames.length; i++) {
+      var hostname = setOfHostNames[i];
+      var mountPointsPerHost = hostsInfo[hostname].disk_info;
+      var mountPointAsRoot = mountPointsPerHost.findProperty('mountpoint', '/');
+
+      // If Server does not send any host details information then atleast one mountpoint should be presumed as root
+      // This happens in a single container Linux Docker environment.
+      if (!mountPointAsRoot) {
+        mountPointAsRoot = {
+          mountpoint: '/'
+        };
+      }
+
+      mountPointsPerHost.filter(mountPointFilter).forEach(function (mPoint) {
+        if( !allMountPoints.findProperty("mountpoint", mPoint.mountpoint)) {
+          allMountPoints.push(mPoint);
+        }
+      }, this);
+    }
+
+    if (!allMountPoints.length) {
+      allMountPoints.push(mountPointAsRoot);
+    }
+    return allMountPoints;
+  },
+
+  /**
+   * Settings for <code>single_mountpoint</code>-initializer
+   * Used for configs with value as one of the possible mount points
+   *
+   * @see _initAsSingleMountPoint
+   * @param {string|string[]} components
+   * @param {string} winReplacer
+   * @returns {{components: string[], winReplacer: string, type: string}}
+   */
+  getSingleMountPointConfig: function (components, winReplacer) {
+    winReplacer = winReplacer || 'default';
+    return {
+      components: Em.makeArray(components),
+      winReplacer: winReplacer,
+      type: 'single_mountpoint'
+    };
+  },
+
+  /**
+   * Settings for <code>multiple_mountpoints</code>-initializer
+   * Used for configs with value as all of the possible mount points
+   *
+   * @see _initAsMultipleMountPoints
+   * @param {string|string[]} components
+   * @param {string} winReplacer
+   * @returns {{components: string[], winReplacer: string, type: string}}
+   */
+  getMultipleMountPointsConfig: function (components, winReplacer) {
+    winReplacer = winReplacer || 'default';
+    return {
+      components: Em.makeArray(components),
+      winReplacer: winReplacer,
+      type: 'multiple_mountpoints'
+    };
+  }
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-web/test/utils/ajax/ajax_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/ajax/ajax_test.js b/ambari-web/test/utils/ajax/ajax_test.js
index 3556e1e..d252c3d 100644
--- a/ambari-web/test/utils/ajax/ajax_test.js
+++ b/ambari-web/test/utils/ajax/ajax_test.js
@@ -29,16 +29,11 @@ describe('App.ajax', function() {
 
   beforeEach(function() {
     App.ajax.send.restore();
-    sinon.stub(App.logger, 'setTimer');
     sinon.spy(App.ajax, 'send'); // no sense to test stubbed function, so going to spy on it
     App.set('apiPrefix', '/api/v1');
     App.set('clusterName', 'tdk');
   });
 
-  afterEach(function() {
-    App.logger.setTimer.restore();
-  });
-
   describe('#send', function() {
     it('Without sender', function() {
       expect(App.ajax.send({})).to.equal(null);
@@ -173,7 +168,7 @@ describe('App.ajax', function() {
       });
     });
   });
-
+  
   describe('#abortRequests', function () {
 
     var xhr = {
@@ -203,6 +198,6 @@ describe('App.ajax', function() {
     it('should clear requests array', function () {
       expect(requests).to.have.length(0);
     });
-
+    
   });
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/90967bcd/ambari-web/test/utils/configs/config_initializer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/configs/config_initializer_test.js b/ambari-web/test/utils/configs/config_initializer_test.js
index ceed065..19ba03d 100644
--- a/ambari-web/test/utils/configs/config_initializer_test.js
+++ b/ambari-web/test/utils/configs/config_initializer_test.js
@@ -905,6 +905,312 @@ describe('App.ConfigInitializer', function () {
 
   });
 
+  describe('config with mount points', function () {
+
+    var localDB = {
+        masterComponentHosts: [
+          {
+            component: 'NAMENODE',
+            hostName: 'h0'
+          },
+          {
+            component: 'SECONDARY_NAMENODE',
+            hostName: 'h4'
+          },
+          {
+            component: 'APP_TIMELINE_SERVER',
+            hostName: 'h0'
+          },
+          {
+            component: 'ZOOKEEPER_SERVER',
+            hostName: 'h0'
+          },
+          {
+            component: 'ZOOKEEPER_SERVER',
+            hostName: 'h1'
+          },
+          {
+            component: 'OOZIE_SERVER',
+            hostName: 'h0'
+          },
+          {
+            component: 'OOZIE_SERVER',
+            hostName: 'h1'
+          },
+          {
+            component: 'NIMBUS',
+            hostName: 'h2'
+          },
+          {
+            component: 'FALCON_SERVER',
+            hostName: 'h3'
+          },
+          {
+            component: 'KAFKA_BROKER',
+            hostName: 'h0'
+          },
+          {
+            component: 'KAFKA_BROKER',
+            hostName: 'h1'
+          }
+        ],
+        slaveComponentHosts: [
+          {
+            componentName: 'DATANODE',
+            hosts: [
+              {
+                hostName: 'h0'
+              },
+              {
+                hostName: 'h1'
+              }
+            ]
+          },
+          {
+            componentName: 'TASKTRACKER',
+            hosts: [
+              {
+                hostName: 'h0'
+              },
+              {
+                hostName: 'h1'
+              }
+            ]
+          },
+          {
+            componentName: 'NODEMANAGER',
+            hosts: [
+              {
+                hostName: 'h0'
+              },
+              {
+                hostName: 'h1'
+              },
+              {
+                hostName: 'h4'
+              }
+            ]
+          },
+          {
+            componentName: 'HBASE_REGIONSERVER',
+            hosts: [
+              {
+                hostName: 'h0'
+              },
+              {
+                hostName: 'h1'
+              }
+            ]
+          },
+          {
+            componentName: 'SUPERVISOR',
+            hosts: [
+              {
+                hostName: 'h0'
+              },
+              {
+                hostName: 'h1'
+              }
+            ]
+          }
+        ],
+        hosts: {
+          h0: {
+            disk_info: [
+              {
+                mountpoint: '/'
+              },
+              {
+                mountpoint: '/home'
+              },
+              {
+                mountpoint: '/boot'
+              },
+              {
+                mountpoint: '/boot/efi'
+              },
+              {
+                mountpoint: '/mnt'
+              },
+              {
+                mountpoint: '/mnt/efi'
+              },
+              {
+                mountpoint: '/media/disk0',
+                available: '100000000'
+              },
+              {
+                mountpoint: '/mount0',
+                available: '100000000'
+              }
+            ]
+          },
+          h4: {
+            disk_info: [
+              {
+                mountpoint: 'c:',
+                available: '100000000'
+              }
+            ]
+          }
+        }
+      },
+      cases = [
+        {
+          name: 'dfs.namenode.name.dir',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n'
+        },
+        {
+          name: 'dfs.name.dir',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n'
+        },
+        {
+          name: 'fs.checkpoint.dir',
+          isOnlyFirstOneNeeded: true,
+          value: 'file:///c:/default\n'
+        },
+        {
+          name: 'dfs.namenode.checkpoint.dir',
+          isOnlyFirstOneNeeded: true,
+          value: 'file:///c:/default\n'
+        },
+        {
+          name: 'dfs.data.dir',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
+        },
+        {
+          name: 'dfs.datanode.data.dir',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
+        },
+        {
+          name: 'mapred.local.dir',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
+        },
+        {
+          name: 'yarn.nodemanager.log-dirs',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
+        },
+        {
+          name: 'yarn.nodemanager.local-dirs',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
+        },
+        {
+          name: 'yarn.timeline-service.leveldb-timeline-store.path',
+          isOnlyFirstOneNeeded: true,
+          value: '/media/disk0/default'
+        },
+        {
+          name: 'yarn.timeline-service.leveldb-state-store.path',
+          isOnlyFirstOneNeeded: true,
+          value: '/media/disk0/default'
+        },
+        {
+          name: 'dataDir',
+          isOnlyFirstOneNeeded: true,
+          value: '/media/disk0/default'
+        },
+        {
+          name: 'oozie_data_dir',
+          isOnlyFirstOneNeeded: true,
+          value: '/media/disk0/default'
+        },
+        {
+          name: 'storm.local.dir',
+          isOnlyFirstOneNeeded: true,
+          value: '/media/disk0/default'
+        },
+        {
+          name: '*.falcon.graph.storage.directory',
+          isOnlyFirstOneNeeded: true,
+          value: '/default'
+        },
+        {
+          name: '*.falcon.graph.serialize.path',
+          isOnlyFirstOneNeeded: true,
+          value: '/default'
+        },
+        {
+          name: 'log.dirs',
+          isOnlyFirstOneNeeded: false,
+          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
+        }
+      ];
+
+    beforeEach(function () {
+      sinon.stub(App.Host, 'find').returns([
+        Em.Object.create({
+          id: 'h1',
+          diskInfo: [
+            {
+              mountpoint: '/media/disk1',
+              type: 'devtmpfs'
+            },
+            {
+              mountpoint: '/media/disk1',
+              type: 'tmpfs'
+            },
+            {
+              mountpoint: '/media/disk1',
+              type: 'vboxsf'
+            },
+            {
+              mountpoint: '/media/disk1',
+              type: 'CDFS'
+            },
+            {
+              mountpoint: '/media/disk1',
+              available: '0'
+            },
+            {
+              mountpoint: '/media/disk1',
+              available: '100000000'
+            },
+            {
+              mountpoint: '/mount1',
+              available: '100000000'
+            }
+          ]
+        }),
+        Em.Object.create({
+          id: 'h2',
+          diskInfo: [
+            {
+              mountpoint: '/'
+            }
+          ]
+        }),
+        Em.Object.create({
+          id: 'h3',
+          diskInfo: []
+        })
+      ]);
+    });
+
+    afterEach(function () {
+      App.Host.find.restore();
+    });
+
+    cases.forEach(function (item) {
+      it(item.name, function () {
+        serviceConfigProperty.setProperties({
+          name: item.name,
+          recommendedValue: '/default'
+        });
+        App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, {});
+        expect(serviceConfigProperty.get('value')).to.equal(item.value);
+        expect(serviceConfigProperty.get('recommendedValue')).to.equal(item.value);
+      });
+    });
+
+  });
+
   describe('initializerTypes', function () {
     var types = App.ConfigInitializer.get('initializerTypes');
     Em.keys(types).forEach(function(type) {
@@ -950,4 +1256,156 @@ describe('App.ConfigInitializer', function () {
     });
 
   });
+
+  describe('#_filterMountPoint', function() {
+    [
+      {
+        mPoint: {
+          mountpoint: '/'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/home'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/etc/resolv.conf'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/etc/hostname'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/etc/hosts'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/boot'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/mnt'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/tmp'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/some-dir',
+          type: 'devtmpfs'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/some-dir',
+          type: 'tmpfs'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/some-dir',
+          type: 'vboxsf'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/some-dir',
+          type: 'CDFS'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/usr/hdp'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/usr/hdp/1'
+        },
+        localDB: {},
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/usr/hdp/current'
+        },
+        localDB: {},
+        e: true
+      },
+      {
+        mPoint: {
+          mountpoint: '/usr/hdp/2.5'
+        },
+        localDB: {
+          selectedStack: {
+            repository_version: '2.5'
+          }
+        },
+        e: true
+      },
+      {
+        mPoint: {
+          mountpoint: '/usr/hdp/2.5.0'
+        },
+        localDB: {
+          selectedStack: {
+            repository_version: '2.5'
+          }
+        },
+        e: false
+      },
+      {
+        mPoint: {
+          mountpoint: '/normal/directory'
+        },
+        localDB: {
+          selectedStack: {
+            repository_version: '2.5'
+          }
+        },
+        e: true
+      }
+    ].forEach(function(test) {
+      it('mount point "{0}" should be {1}'.format(test.mPoint.mountpoint, test.e ? 'valid' : 'invalid'), function() {
+        var fFn = App.ConfigInitializer._filterMountPoint(test.localDB);
+        expect(fFn(test.mPoint)).to.be.equal(test.e);
+      });
+    });
+  });
 });


Mime
View raw message