ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rle...@apache.org
Subject ambari git commit: AMBARI-13487. Basic StackAdvisor support for blueprint (configurations) (Oliver Szabo via rlevas)
Date Thu, 29 Oct 2015 23:54:05 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk c972889b4 -> c8e138d98


AMBARI-13487. Basic StackAdvisor support for blueprint (configurations) (Oliver Szabo via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c8e138d9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c8e138d9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c8e138d9

Branch: refs/heads/trunk
Commit: c8e138d98fcfbd25a7d8c46726c956fa50f30fac
Parents: c972889
Author: Oliver Szabo <oszabo@hortonworks.com>
Authored: Thu Oct 29 19:53:50 2015 -0400
Committer: Robert Levas <rlevas@hortonworks.com>
Committed: Thu Oct 29 19:53:59 2015 -0400

----------------------------------------------------------------------
 .../StackAdvisorBlueprintProcessor.java         | 171 ++++++++++++++
 .../ambari/server/controller/AmbariServer.java  |   3 +
 .../BlueprintConfigurationProcessor.java        |  97 ++++++++
 .../internal/ClusterResourceProvider.java       |   1 +
 .../internal/ProvisionClusterRequest.java       |  43 +++-
 .../server/topology/AdvisedConfiguration.java   |  41 ++++
 .../topology/ClusterConfigurationRequest.java   |  11 +-
 .../ambari/server/topology/ClusterTopology.java |   6 +
 .../server/topology/ClusterTopologyImpl.java    |  17 ++
 .../topology/ConfigRecommendationStrategy.java  |  37 +++
 .../ambari/server/topology/TopologyManager.java |  19 +-
 .../StackAdvisorBlueprintProcessorTest.java     | 200 ++++++++++++++++
 .../BlueprintConfigurationProcessorTest.java    | 233 +++++++++++++++++--
 .../server/topology/TopologyManagerTest.java    |   1 +
 14 files changed, 853 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
new file mode 100644
index 0000000..0325885
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services.stackadvisor;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import java.util.Set;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorRequest.StackAdvisorRequestType;
+import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
+import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse.BlueprintConfigurations;
+import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.AdvisedConfiguration;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroup;
+import org.apache.ambari.server.topology.HostGroupInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Generate advised configurations for blueprint cluster provisioning by the stack advisor.
+ */
+@Singleton
+public class StackAdvisorBlueprintProcessor {
+
+  private static Logger LOG = LoggerFactory.getLogger(StackAdvisorBlueprintProcessor.class);
+
+  private static StackAdvisorHelper stackAdvisorHelper;
+
+  static final String RECOMMENDATION_FAILED = "Configuration recommendation failed.";
+  static final String INVALID_RESPONSE = "Configuration recommendation returned with invalid response.";
+
+  public static void init(StackAdvisorHelper instance) {
+    stackAdvisorHelper = instance;
+  }
+
+  /**
+   * Recommend configurations by the stack advisor, then store the results in cluster topology.
+   * @param clusterTopology cluster topology instance
+   */
+  public void adviseConfiguration(ClusterTopology clusterTopology) throws ConfigurationTopologyException {
+    StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, StackAdvisorRequestType.CONFIGURATIONS);
+    try {
+      RecommendationResponse response = stackAdvisorHelper.recommend(request);
+      addAdvisedConfigurationsToTopology(response, clusterTopology);
+    } catch (StackAdvisorException e) {
+      throw new ConfigurationTopologyException(RECOMMENDATION_FAILED, e);
+    } catch (IllegalArgumentException e) {
+      throw new ConfigurationTopologyException(INVALID_RESPONSE, e);
+    }
+  }
+
+  private StackAdvisorRequest createStackAdvisorRequest(ClusterTopology clusterTopology, StackAdvisorRequestType requestType) {
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    Map<String, Set<String>> hgComponentsMap = gatherHostGroupComponents(clusterTopology);
+    Map<String, Set<String>> hgHostsMap = gatherHostGroupBindings(clusterTopology);
+    Map<String, Set<String>> componentHostsMap = gatherComponentsHostsMap(hgComponentsMap,
+      hgHostsMap);
+    return StackAdvisorRequest.StackAdvisorRequestBuilder
+      .forStack(stack.getName(), stack.getVersion())
+      .forServices(new ArrayList<String>(clusterTopology.getBlueprint().getServices()))
+      .forHosts(gatherHosts(clusterTopology))
+      .forHostsGroupBindings(gatherHostGroupBindings(clusterTopology))
+      .forHostComponents(gatherHostGroupComponents(clusterTopology))
+      .withComponentHostsMap(componentHostsMap)
+      .withConfigurations(calculateConfigs(clusterTopology))
+      .ofType(requestType)
+      .build();
+  }
+
+  private Map<String, Set<String>> gatherHostGroupBindings(ClusterTopology clusterTopology) {
+    Map<String, Set<String>> hgBindngs = Maps.newHashMap();
+    for (Map.Entry<String, HostGroupInfo> hgEnrty: clusterTopology.getHostGroupInfo().entrySet()) {
+      hgBindngs.put(hgEnrty.getKey(), Sets.newCopyOnWriteArraySet(hgEnrty.getValue().getHostNames()));
+    }
+    return hgBindngs;
+  }
+
+  private Map<String, Set<String>> gatherHostGroupComponents(ClusterTopology clusterTopology) {
+    Map<String, Set<String>> hgComponentsMap = Maps.newHashMap();
+    for (Map.Entry<String, HostGroup> hgEnrty: clusterTopology.getBlueprint().getHostGroups().entrySet()) {
+      hgComponentsMap.put(hgEnrty.getKey(), Sets.newCopyOnWriteArraySet(hgEnrty.getValue().getComponents()));
+    }
+    return hgComponentsMap;
+  }
+
+  private Map<String, Map<String, Map<String, String>>> calculateConfigs(ClusterTopology clusterTopology) {
+    Map<String, Map<String, Map<String, String>>> result = Maps.newHashMap();
+    Map<String, Map<String, String>> fullProperties = clusterTopology.getConfiguration().getFullProperties();
+    for (Map.Entry<String, Map<String, String>> siteEntry : fullProperties.entrySet()) {
+      Map<String, Map<String, String>> propsMap = Maps.newHashMap();
+      propsMap.put("properties", siteEntry.getValue());
+      result.put(siteEntry.getKey(), propsMap);
+    }
+    return result;
+  }
+
+  private Map<String, Set<String>> gatherComponentsHostsMap(Map<String, Set<String>> hostGroups, Map<String, Set<String>> bindingHostGroups) {
+    Map<String, Set<String>> componentHostsMap = new HashMap<String, Set<String>>();
+    if (null != bindingHostGroups && null != hostGroups) {
+      for (Map.Entry<String, Set<String>> hgComponents : hostGroups.entrySet()) {
+        String hgName = hgComponents.getKey();
+        Set<String> components = hgComponents.getValue();
+
+        Set<String> hosts = bindingHostGroups.get(hgName);
+        for (String component : components) {
+          Set<String> componentHosts = componentHostsMap.get(component);
+          if (componentHosts == null) { // if was not initialized
+            componentHosts = new HashSet<String>();
+            componentHostsMap.put(component, componentHosts);
+          }
+          componentHosts.addAll(hosts);
+        }
+      }
+    }
+    return componentHostsMap;
+  }
+
+  private List<String> gatherHosts(ClusterTopology clusterTopology) {
+    List<String> hosts = Lists.newArrayList();
+    for (Map.Entry<String, HostGroupInfo> entry : clusterTopology.getHostGroupInfo().entrySet()) {
+      hosts.addAll(entry.getValue().getHostNames());
+    }
+    return hosts;
+  }
+
+  private void addAdvisedConfigurationsToTopology(RecommendationResponse response,
+                                                  ClusterTopology topology) {
+    Preconditions.checkArgument(response.getRecommendations() != null,
+      "Recommendation response is empty.");
+    Preconditions.checkArgument(response.getRecommendations().getBlueprint() != null,
+      "Blueprint field is missing from the recommendation response.");
+    Preconditions.checkArgument(response.getRecommendations().getBlueprint().getConfigurations() != null,
+      "Configurations are missing from the recommendation blueprint response.");
+
+    Map<String, BlueprintConfigurations> recommendedConfigurations =
+      response.getRecommendations().getBlueprint().getConfigurations();
+    for (Map.Entry<String, BlueprintConfigurations> configEntry : recommendedConfigurations.entrySet()) {
+      String configType = configEntry.getKey();
+      BlueprintConfigurations blueprintConfig = configEntry.getValue();
+      topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration(
+        blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 9044e37..625ebc6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -45,6 +45,7 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.KeyService;
 import org.apache.ambari.server.api.services.PersistKeyValueImpl;
 import org.apache.ambari.server.api.services.PersistKeyValueService;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor;
 import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorHelper;
 import org.apache.ambari.server.bootstrap.BootStrapImpl;
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
@@ -704,6 +705,8 @@ public class AmbariServer {
     ClusterPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
     AmbariPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
     ActionManager.setTopologyManager(injector.getInstance(TopologyManager.class));
+    TopologyManager.init(injector.getInstance(StackAdvisorBlueprintProcessor.class));
+    StackAdvisorBlueprintProcessor.init(injector.getInstance(StackAdvisorHelper.class));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index d3d95bd..c736cc5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -19,9 +19,15 @@
 package org.apache.ambari.server.controller.internal;
 
 
+import com.google.common.base.Predicates;
+import com.google.common.collect.Maps;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.AdvisedConfiguration;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.Cardinality;
 import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.commons.lang.StringUtils;
@@ -206,6 +212,8 @@ public class BlueprintConfigurationProcessor {
     Configuration clusterConfig = clusterTopology.getConfiguration();
     Map<String, HostGroupInfo> groupInfoMap = clusterTopology.getHostGroupInfo();
 
+    doRecommendConfigurations(clusterConfig, configTypesUpdated);
+
     // filter out any properties that should not be included, based on the dependencies
     // specified in the stacks, and the filters defined in this class
     doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
@@ -383,6 +391,95 @@ public class BlueprintConfigurationProcessor {
   }
 
   /**
+   * Update configuration properties from recommended configurations of the stack advisor based on
+   * {@link ConfigRecommendationStrategy}
+   * @param configuration configuration being processed
+   * @param configTypesUpdated updated config types
+   */
+  private void doRecommendConfigurations(Configuration configuration, Set<String> configTypesUpdated) {
+    ConfigRecommendationStrategy configRecommendationStrategy = clusterTopology.getConfigRecommendationStrategy();
+    Map<String, AdvisedConfiguration> advisedConfigurations = clusterTopology.getAdvisedConfigurations();
+    if (ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY.equals(configRecommendationStrategy)) {
+      LOG.debug("Filter out recommended configurations. Keep only the stack defaults.");
+      doFilterStackDefaults(configuration, advisedConfigurations);
+    }
+    if (!ConfigRecommendationStrategy.NEVER_APPLY.equals(configRecommendationStrategy)) {
+      for (Map.Entry<String, AdvisedConfiguration> advConfEntry : advisedConfigurations.entrySet()) {
+        String configType = advConfEntry.getKey();
+        AdvisedConfiguration advisedConfig = advConfEntry.getValue();
+        LOG.debug("Update '{}' configurations with recommended configurations provided by the stack advisor.", configType);
+        doReplaceProperties(configuration, configType, advisedConfig, configTypesUpdated);
+        doRemovePropertiesIfNeeded(configuration, configType, advisedConfig, configTypesUpdated);
+      }
+    } else {
+      LOG.debug("No any recommended configuration applied. (strategy: {})", ConfigRecommendationStrategy.NEVER_APPLY);
+    }
+  }
+
+  /**
+   * Drop every configuration property from advised configuration that is not found in the stack defaults.
+   * @param configuration configuration being processed
+   * @param advisedConfigurations advised configuration instance
+   */
+  private void doFilterStackDefaults(Configuration configuration, Map<String, AdvisedConfiguration> advisedConfigurations) {
+    Blueprint blueprint = clusterTopology.getBlueprint();
+    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
+    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
+    for (Map.Entry<String, AdvisedConfiguration> adConfEntry : advisedConfigurations.entrySet()) {
+      AdvisedConfiguration advisedConfiguration = adConfEntry.getValue();
+      if (stackDefaultProps.containsKey(adConfEntry.getKey())) {
+        Map<String, String> defaultProps = stackDefaultProps.get(adConfEntry.getKey());
+        Map<String, String> outFilteredProps = Maps.filterKeys(advisedConfiguration.getProperties(),
+          Predicates.not(Predicates.in(defaultProps.keySet())));
+        advisedConfiguration.getProperties().keySet().removeAll(outFilteredProps.keySet());
+
+        if (advisedConfiguration.getPropertyValueAttributes() != null) {
+          Map<String, ValueAttributesInfo> outFilteredValueAttrs = Maps.filterKeys(advisedConfiguration.getPropertyValueAttributes(),
+            Predicates.not(Predicates.in(defaultProps.keySet())));
+          advisedConfiguration.getPropertyValueAttributes().keySet().removeAll(outFilteredValueAttrs.keySet());
+        }
+      } else {
+        advisedConfiguration.getProperties().clear();
+      }
+    }
+  }
+
+  /**
+   * Update configuration properties based on advised configuration properties.
+   * @param configuration configuration being processed
+   * @param configType type of configuration. e.g.: yarn-site
+   * @param advisedConfig advised configuration instance
+   * @param configTypesUpdated updated config types
+   */
+  private void doReplaceProperties(Configuration configuration, String configType,
+                                   AdvisedConfiguration advisedConfig, Set<String> configTypesUpdated) {
+    for (Map.Entry<String, String> propEntry : advisedConfig.getProperties().entrySet()) {
+      configuration.setProperty(configType, propEntry.getKey(), propEntry.getValue());
+      configTypesUpdated.add(configType);
+    }
+  }
+
+  /**
+   * Remove properties that are flagged with 'delete' value attribute.
+   * @param configuration configuration being processed
+   * @param configType type of configuration. e.g.: yarn-site
+   * @param advisedConfig advised configuration instance
+   * @param configTypesUpdated updated config types
+   */
+  private void doRemovePropertiesIfNeeded(Configuration configuration,
+                                          String configType, AdvisedConfiguration advisedConfig, Set<String> configTypesUpdated) {
+    if (advisedConfig.getPropertyValueAttributes() != null) {
+      for (Map.Entry<String, ValueAttributesInfo> valueAttrEntry :
+        advisedConfig.getPropertyValueAttributes().entrySet()) {
+        if ("true".equalsIgnoreCase(valueAttrEntry.getValue().getDelete())) {
+          configuration.removeProperty(configType, valueAttrEntry.getKey());
+          configTypesUpdated.add(configType);
+        }
+      }
+    }
+  }
+
+  /**
    * Creates a Collection of PropertyUpdater maps that will handle the configuration
    *   update for this cluster.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
index cd28aac..84c13b9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
@@ -341,6 +341,7 @@ public class ClusterResourceProvider extends AbstractControllerResourceProvider
     baseUnsupported.remove("default_password");
     baseUnsupported.remove("configurations");
     baseUnsupported.remove("credentials");
+    baseUnsupported.remove("config_recommendation_strategy");
 
     return checkConfigPropertyIds(baseUnsupported, "Clusters");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
index 9716abe..56c4b76 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
@@ -19,9 +19,11 @@ package org.apache.ambari.server.controller.internal;
 
 import com.google.common.base.Enums;
 import com.google.common.base.Strings;
+import com.google.common.base.Optional;
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.security.encryption.CredentialStoreType;
 import org.apache.ambari.server.stack.NoSuchStackException;
+import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.ConfigurationFactory;
 import org.apache.ambari.server.topology.Credential;
@@ -87,6 +89,11 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   public static final String DEFAULT_PASSWORD_PROPERTY = "default_password";
 
   /**
+   * configuration recommendation strategy property name
+   */
+  public static final String CONFIG_RECOMMENDATION_STRATEGY = "config_recommendation_strategy";
+
+  /**
    * configuration factory
    */
   private static ConfigurationFactory configurationFactory = new ConfigurationFactory();
@@ -103,6 +110,11 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
 
   private Map<String, Credential> credentialsMap;
 
+  /**
+   * configuration recommendation strategy
+   */
+  private final ConfigRecommendationStrategy configRecommendationStrategy;
+
   private final static Logger LOG = LoggerFactory.getLogger(ProvisionClusterRequest.class);
 
   /**
@@ -114,7 +126,7 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   public ProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws
     InvalidTopologyTemplateException {
     setClusterName(String.valueOf(properties.get(
-        ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID)));
+      ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID)));
 
     if (properties.containsKey(DEFAULT_PASSWORD_PROPERTY)) {
       defaultPassword = String.valueOf(properties.get(DEFAULT_PASSWORD_PROPERTY));
@@ -138,6 +150,8 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
     parseHostGroupInfo(properties);
 
     this.credentialsMap = parseCredentials(properties);
+
+    this.configRecommendationStrategy = parseConfigRecommendationStrategy(properties);
   }
 
   private Map<String, Credential> parseCredentials(Map<String, Object> properties) throws
@@ -184,6 +198,10 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
     this.clusterName = clusterName;
   }
 
+  public ConfigRecommendationStrategy getConfigRecommendationStrategy() {
+    return configRecommendationStrategy;
+  }
+
   @Override
   public Long getClusterId() {
     return clusterId;
@@ -344,4 +362,27 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
           "Host group '%s' must contain at least one 'hosts/fqdn' or a 'host_count' value", name));
     }
   }
+
+  /**
+   * Parse config recommendation strategy. Throws exception in case of the value is not correct.
+   * The default value is {@link ConfigRecommendationStrategy#NEVER_APPLY}
+   * @param properties request properties
+   * @throws InvalidTopologyTemplateException specified config recommendation strategy property fail validation
+   */
+  private ConfigRecommendationStrategy parseConfigRecommendationStrategy(Map<String, Object> properties)
+    throws InvalidTopologyTemplateException {
+    if (properties.containsKey(CONFIG_RECOMMENDATION_STRATEGY)) {
+      String configRecommendationStrategy = String.valueOf(properties.get(CONFIG_RECOMMENDATION_STRATEGY));
+      Optional<ConfigRecommendationStrategy> configRecommendationStrategyOpt =
+        Enums.getIfPresent(ConfigRecommendationStrategy.class, configRecommendationStrategy);
+      if (!configRecommendationStrategyOpt.isPresent()) {
+        throw new InvalidTopologyTemplateException(String.format(
+          "Config recommendation stratagy is not supported: %s", configRecommendationStrategy));
+      }
+      return configRecommendationStrategyOpt.get();
+    } else {
+      // default
+      return ConfigRecommendationStrategy.NEVER_APPLY;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/AdvisedConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AdvisedConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AdvisedConfiguration.java
new file mode 100644
index 0000000..84b3655
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AdvisedConfiguration.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+import java.util.Map;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+
+public class AdvisedConfiguration {
+  private final Map<String, String> properties;
+  private final Map<String, ValueAttributesInfo> propertyValueAttributes;
+
+  public AdvisedConfiguration(Map<String, String> properties,
+                              Map<String, ValueAttributesInfo> propertyValueAttributes) {
+    this.properties = properties;
+    this.propertyValueAttributes = propertyValueAttributes;
+  }
+
+  public Map<String, String> getProperties() {
+    return properties;
+  }
+
+  public Map<String, ValueAttributesInfo> getPropertyValueAttributes() {
+    return propertyValueAttributes;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index 1ebde17..1a650ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.topology;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor;
 import org.apache.ambari.server.controller.ClusterRequest;
 import org.apache.ambari.server.controller.ConfigurationRequest;
 import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
@@ -47,15 +48,18 @@ public class ClusterConfigurationRequest {
   private AmbariContext ambariContext;
   private ClusterTopology clusterTopology;
   private BlueprintConfigurationProcessor configurationProcessor;
+  private StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor;
   private Stack stack;
 
-  public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology clusterTopology, boolean setInitial) {
+  public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology clusterTopology, boolean setInitial,
+                                     StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor) {
     this.ambariContext = ambariContext;
     this.clusterTopology = clusterTopology;
     Blueprint blueprint = clusterTopology.getBlueprint();
     this.stack = blueprint.getStack();
     // set initial configuration (not topology resolved)
     this.configurationProcessor = new BlueprintConfigurationProcessor(clusterTopology);
+    this.stackAdvisorBlueprintProcessor = stackAdvisorBlueprintProcessor;
     if (setInitial) {
       setConfigurationsOnCluster(clusterTopology, TopologyManager.INITIAL_CONFIG_TAG, Collections.<String>emptySet());
     }
@@ -69,7 +73,12 @@ public class ClusterConfigurationRequest {
   public void process() throws AmbariException, ConfigurationTopologyException {
     // this will update the topo cluster config and all host group configs in the cluster topology
     Set<String> updatedConfigTypes = Collections.emptySet();
+
     try {
+      // obtain recommended configurations before config updates
+      if (!ConfigRecommendationStrategy.NEVER_APPLY.equals(this.clusterTopology.getConfigRecommendationStrategy())) {
+        stackAdvisorBlueprintProcessor.adviseConfiguration(this.clusterTopology);
+      }
       updatedConfigTypes =
         configurationProcessor.doUpdateForClusterCreate();
     } catch (ConfigurationTopologyException e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
index 284a913..0ac2e2a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
@@ -153,6 +153,12 @@ public interface ClusterTopology {
    */
   public RequestStatusResponse startHost(String hostName);
 
+  public void setConfigRecommendationStrategy(ConfigRecommendationStrategy strategy);
+
+  public ConfigRecommendationStrategy getConfigRecommendationStrategy();
+
+  public Map<String, AdvisedConfiguration> getAdvisedConfigurations();
+
   //todo: don't expose ambari context from this class
   public AmbariContext getAmbariContext();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index 5b716ae..687c2e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -45,6 +45,8 @@ public class ClusterTopologyImpl implements ClusterTopology {
   //todo: for example: provision using bp1 and scale using bp2
   private Blueprint blueprint;
   private Configuration configuration;
+  private ConfigRecommendationStrategy configRecommendationStrategy;
+  private Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<String, AdvisedConfiguration>();
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<String, HostGroupInfo>();
   private final AmbariContext ambariContext;
 
@@ -234,6 +236,21 @@ public class ClusterTopologyImpl implements ClusterTopology {
   }
 
   @Override
+  public void setConfigRecommendationStrategy(ConfigRecommendationStrategy strategy) {
+    this.configRecommendationStrategy = strategy;
+  }
+
+  @Override
+  public ConfigRecommendationStrategy getConfigRecommendationStrategy() {
+    return this.configRecommendationStrategy;
+  }
+
+  @Override
+  public Map<String, AdvisedConfiguration> getAdvisedConfigurations() {
+    return this.advisedConfigurations;
+  }
+
+  @Override
   public AmbariContext getAmbariContext() {
     return ambariContext;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigRecommendationStrategy.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigRecommendationStrategy.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigRecommendationStrategy.java
new file mode 100644
index 0000000..7650dee
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigRecommendationStrategy.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+public enum ConfigRecommendationStrategy {
+  /**
+   *  Configuration recommendations are always applied, overriding stack defaults and
+   *  configuration defined by the user in the Blueprint and/or Cluster Creation Template.
+   */
+  ALWAYS_APPLY,
+  /**
+   * Configuration recommendations are ignored with this option, both for stack defaults
+   * and configuration defined by the user in the Blueprint and/or Cluster Creation Template.
+   */
+  NEVER_APPLY,
+  /**
+   *  Configuration recommendations are always applied for properties listed as stack defaults,
+   *  but not for configurations defined by the user in the Blueprint and/or Cluster Creation Template.
+   */
+  ONLY_STACK_DEFAULTS_APPLY;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 26d8c3e..480c2ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -22,6 +22,7 @@ import com.google.inject.Singleton;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.Request;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
 import org.apache.ambari.server.controller.internal.CredentialResourceProvider;
@@ -82,6 +83,7 @@ public class TopologyManager {
   //todo: inject
   private static LogicalRequestFactory logicalRequestFactory = new LogicalRequestFactory();
   private static AmbariContext ambariContext = new AmbariContext();
+  private static StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor;
 
   private final Object initializationLock = new Object();
 
@@ -148,15 +150,17 @@ public class TopologyManager {
     long clusterId = ambariContext.getClusterId(clusterName);
     topology.setClusterId(clusterId);
     request.setClusterId(clusterId);
+    // set recommendation strategy
+    topology.setConfigRecommendationStrategy(request.getConfigRecommendationStrategy());
     // persist request after it has successfully validated
     PersistedTopologyRequest persistedRequest = persistedState.persistTopologyRequest(request);
 
     clusterTopologyMap.put(clusterId, topology);
 
-    addClusterConfigRequest(topology, new ClusterConfigurationRequest(ambariContext, topology, true));
-
     final Stack stack = topology.getBlueprint().getStack();
 
+    addClusterConfigRequest(topology, new ClusterConfigurationRequest(
+      ambariContext, topology, true, stackAdvisorBlueprintProcessor));
     LogicalRequest logicalRequest = processRequest(persistedRequest, topology, provisionId);
 
     //todo: this should be invoked as part of a generic lifecycle event which could possibly
@@ -274,7 +278,7 @@ public class TopologyManager {
     // this registers/updates all request host groups
     topology.update(request);
     return getRequestStatus(processRequest(persistedRequest, topology,
-        ambariContext.getNextRequestId()).getRequestId());
+      ambariContext.getNextRequestId()).getRequestId());
   }
 
   public void onHostRegistered(HostImpl host, boolean associatedWithCluster) {
@@ -446,7 +450,7 @@ public class TopologyManager {
   }
 
   private LogicalRequest processRequest(PersistedTopologyRequest request, ClusterTopology topology, Long requestId)
-      throws AmbariException {
+    throws AmbariException {
 
     LOG.info("TopologyManager.processRequest: Entering");
 
@@ -599,7 +603,8 @@ public class TopologyManager {
         configChecked = true;
         if (!ambariContext.doesConfigurationWithTagExist(topology.getClusterId(), TOPOLOGY_RESOLVED_TAG)) {
           LOG.info("TopologyManager.replayRequests: no config with TOPOLOGY_RESOLVED found, adding cluster config request");
-          addClusterConfigRequest(topology, new ClusterConfigurationRequest(ambariContext, topology, false));
+          addClusterConfigRequest(topology, new ClusterConfigurationRequest(
+            ambariContext, topology, false, stackAdvisorBlueprintProcessor));
         }
       }
     }
@@ -637,6 +642,10 @@ public class TopologyManager {
     executor.execute(new ConfigureClusterTask(topology, configurationRequest));
   }
 
+  public static void init(StackAdvisorBlueprintProcessor instance) {
+    stackAdvisorBlueprintProcessor = instance;
+  }
+
   private class ConfigureClusterTask implements Runnable {
     private ClusterConfigurationRequest configRequest;
     private ClusterTopology topology;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
new file mode 100644
index 0000000..2baa505
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services.stackadvisor;
+
+import com.google.common.collect.Maps;
+import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.HostGroup;
+import org.apache.ambari.server.topology.HostGroupImpl;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.AdvisedConfiguration;
+import org.apache.ambari.server.topology.BlueprintImpl;
+import org.apache.ambari.server.topology.HostGroupInfo;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import static org.junit.Assert.fail;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class StackAdvisorBlueprintProcessorTest {
+  private StackAdvisorBlueprintProcessor underTest = new StackAdvisorBlueprintProcessor();
+
+  private ClusterTopology clusterTopology = createMock(ClusterTopology.class);
+  private BlueprintImpl blueprint = createMock(BlueprintImpl.class);
+  private Stack stack = createMock(Stack.class);
+  private HostGroup hostGroup = createMock(HostGroup.class);
+  private Configuration configuration = createMock(Configuration.class);
+
+  private static StackAdvisorHelper stackAdvisorHelper = createMock(StackAdvisorHelper.class);
+
+  @BeforeClass
+  public static void initClass() {
+    StackAdvisorBlueprintProcessor.init(stackAdvisorHelper);
+  }
+
+  @Before
+  public void setUp() {
+    reset(clusterTopology, blueprint, stack, stackAdvisorHelper);
+  }
+
+  @Test
+  public void testAdviseConfiguration() throws StackAdvisorException, ConfigurationTopologyException {
+    // GIVEN
+    Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<String, AdvisedConfiguration>();
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(clusterTopology.getHostGroupInfo()).andReturn(createHostGroupInfo()).anyTimes();
+    expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
+    expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getVersion()).andReturn("2.3").anyTimes();
+    expect(stack.getName()).andReturn("HDP").anyTimes();
+    expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
+    expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(hostGroup.getComponents()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
+    expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse());
+    expect(configuration.getFullProperties()).andReturn(createProps());
+
+    replay(clusterTopology, blueprint, stack, hostGroup, configuration, stackAdvisorHelper);
+    // WHEN
+    underTest.adviseConfiguration(clusterTopology);
+    // THEN
+    assertTrue(advisedConfigurations.get("core-site").getProperties().containsKey("dummyKey1"));
+    assertTrue(advisedConfigurations.get("core-site").getPropertyValueAttributes().containsKey("dummyKey2"));
+    assertEquals("dummyValue", advisedConfigurations.get("core-site").getProperties().get("dummyKey1"));
+    assertEquals(Boolean.toString(true), advisedConfigurations.get("core-site")
+      .getPropertyValueAttributes().get("dummyKey2").getDelete());
+  }
+
+  @Test
+  public void testAdviseConfigurationWhenConfigurationRecommendFails() throws StackAdvisorException, ConfigurationTopologyException {
+    // GIVEN
+    Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<String, AdvisedConfiguration>();
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(clusterTopology.getHostGroupInfo()).andReturn(createHostGroupInfo()).anyTimes();
+    expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
+    expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getVersion()).andReturn("2.3").anyTimes();
+    expect(stack.getName()).andReturn("HDP").anyTimes();
+    expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(hostGroup.getComponents()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
+    expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
+    expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andThrow(new StackAdvisorException("ex"));
+    expect(configuration.getFullProperties()).andReturn(createProps());
+
+    replay(clusterTopology, blueprint, stack, hostGroup, configuration, stackAdvisorHelper);
+    // WHEN
+    try {
+      underTest.adviseConfiguration(clusterTopology);
+      fail("Invalid state");
+    } catch (ConfigurationTopologyException e) {
+      assertEquals(StackAdvisorBlueprintProcessor.RECOMMENDATION_FAILED, e.getMessage());
+    }
+  }
+
+  @Test
+  public void testAdviseConfigurationWhenConfigurationRecommendHasInvalidResponse() throws StackAdvisorException, ConfigurationTopologyException {
+    // GIVEN
+    Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<String, AdvisedConfiguration>();
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(clusterTopology.getHostGroupInfo()).andReturn(createHostGroupInfo()).anyTimes();
+    expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
+    expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getVersion()).andReturn("2.3").anyTimes();
+    expect(stack.getName()).andReturn("HDP").anyTimes();
+    expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
+    expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(hostGroup.getComponents()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
+    expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(new RecommendationResponse());
+    expect(configuration.getFullProperties()).andReturn(createProps());
+
+    replay(clusterTopology, blueprint, stack, hostGroup, configuration, stackAdvisorHelper);
+    // WHEN
+    try {
+      underTest.adviseConfiguration(clusterTopology);
+      fail("Invalid state");
+    } catch (ConfigurationTopologyException e) {
+      assertEquals(StackAdvisorBlueprintProcessor.INVALID_RESPONSE, e.getMessage());
+    }
+  }
+
+  private Map<String, Map<String, String>> createProps() {
+    Map<String, Map<String, String>> props = Maps.newHashMap();
+    Map<String, String> siteProps = Maps.newHashMap();
+    siteProps.put("myprop", "myvalue");
+    props.put("core-site", siteProps);
+    return props;
+  }
+
+  private Map<String, HostGroup> createHostGroupMap() {
+    Map<String, HostGroup> hgMap = Maps.newHashMap();
+    hgMap.put("hg1", hostGroup);
+    return hgMap;
+  }
+
+  private Map<String, HostGroupInfo> createHostGroupInfo() {
+    Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<String, HostGroupInfo>();
+    HostGroupInfo hgi1 = new HostGroupInfo("hostGroup1");
+    HostGroupInfo hgi2 = new HostGroupInfo("hostGroup2");
+    hostGroupInfoMap.put("hg1", hgi1);
+    hostGroupInfoMap.put("hg2", hgi2);
+    return hostGroupInfoMap;
+  }
+
+  private RecommendationResponse createRecommendationResponse() {
+    RecommendationResponse response = new RecommendationResponse();
+    RecommendationResponse.Recommendation recommendations = new RecommendationResponse.Recommendation();
+    RecommendationResponse.Blueprint blueprint = new RecommendationResponse.Blueprint();
+    Map<String, RecommendationResponse.BlueprintConfigurations> blueprintConfigurationsMap =
+      new HashMap<String, RecommendationResponse.BlueprintConfigurations>();
+    RecommendationResponse.BlueprintConfigurations blueprintConfig =
+      new RecommendationResponse.BlueprintConfigurations();
+    Map<String, String> properties = new HashMap<String, String>();
+    properties.put("dummyKey1", "dummyValue");
+    blueprintConfig.setProperties(properties);
+    Map<String, ValueAttributesInfo> propAttributes = new HashMap<String, ValueAttributesInfo>();
+    ValueAttributesInfo valueAttributesInfo = new ValueAttributesInfo();
+    valueAttributesInfo.setDelete("true");
+    propAttributes.put("dummyKey2", valueAttributesInfo);
+    blueprintConfig.setPropertyAttributes(propAttributes);
+    blueprintConfigurationsMap.put("core-site", blueprintConfig);
+    blueprint.setConfigurations(blueprintConfigurationsMap);
+    recommendations.setBlueprint(blueprint);
+    response.setRecommendations(recommendations);
+    return response;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 34b72b6..f638265 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -21,8 +21,12 @@ package org.apache.ambari.server.controller.internal;
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertNull;
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.fail;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.AdvisedConfiguration;
+import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
@@ -1212,7 +1216,7 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.http.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.http.address"));
     assertEquals("hdfs config property not exported properly",
         createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
     assertEquals("hdfs config property not exported properly",
@@ -1505,9 +1509,9 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify that the oozie properties that can refer to an external DB are not included in the export
     assertFalse("oozie_existing_mysql_host should not have been present in the exported configuration",
-        oozieEnvProperties.containsKey("oozie_existing_mysql_host"));
+      oozieEnvProperties.containsKey("oozie_existing_mysql_host"));
     assertFalse("oozie.service.JPAService.jdbc.url should not have been present in the exported configuration",
-        oozieSiteProperties.containsKey("oozie.service.JPAService.jdbc.url"));
+      oozieSiteProperties.containsKey("oozie.service.JPAService.jdbc.url"));
 
     // verify that oozie-env heapsize properties are not removed from the configuration
     assertEquals("oozie_heapsize should have been included in exported configuration",
@@ -1589,11 +1593,11 @@ public class BlueprintConfigurationProcessorTest {
         createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
         webHCatSiteProperties.get("templeton.zookeeper.hosts"));
     assertEquals("yarn-site zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        yarnSiteProperties.get("hadoop.registry.zk.quorum"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      yarnSiteProperties.get("hadoop.registry.zk.quorum"));
     assertEquals("slider-client zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        sliderClientProperties.get("slider.zookeeper.quorum"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      sliderClientProperties.get("slider.zookeeper.quorum"));
     assertEquals("kafka zookeeper config not properly exported",
         createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
         kafkaBrokerProperties.get("zookeeper.connect"));
@@ -1750,7 +1754,7 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("Property was incorrectly exported",
         "%HOSTGROUP::" + expectedHostGroupName + "%", properties.get("storm.zookeeper.servers"));
     assertEquals("Property with undefined host was incorrectly exported",
-        "undefined", properties.get("nimbus.childopts"));
+      "undefined", properties.get("nimbus.childopts"));
     assertEquals("Property with undefined host was incorrectly exported",
         "some other info, undefined, more info" , properties.get("worker.childopts"));
   }
@@ -2480,8 +2484,8 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive_hostname",
-        expectedPropertyValue,
-        hiveEnvProperties.get("hive_hostname"));
+      expectedPropertyValue,
+      hiveEnvProperties.get("hive_hostname"));
 
     assertEquals("Unexpected config update for hive.metastore.uris",
         expectedMetaStoreURIs,
@@ -2729,7 +2733,7 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForBlueprintExport();
 
     assertEquals("oozie property not updated correctly",
-        createExportedHostName(expectedHostGroupName, expectedPortNum), oozieSiteProperties.get("oozie.base.url"));
+      createExportedHostName(expectedHostGroupName, expectedPortNum), oozieSiteProperties.get("oozie.base.url"));
     assertEquals("oozie property not updated correctly",
       createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
     assertEquals("oozie property not updated correctly",
@@ -2790,13 +2794,13 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify that the properties with hostname information was correctly preserved
     assertEquals("Yarn Log Server URL was incorrectly updated",
-        "http://" + expectedHostName + ":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
+      "http://" + expectedHostName + ":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
     assertEquals("Yarn ResourceManager hostname was incorrectly exported",
-        expectedHostName, yarnSiteProperties.get("yarn.resourcemanager.hostname"));
+      expectedHostName, yarnSiteProperties.get("yarn.resourcemanager.hostname"));
     assertEquals("Yarn ResourceManager tracker address was incorrectly updated",
         createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly updated",
         createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly updated",
@@ -3739,7 +3743,7 @@ public class BlueprintConfigurationProcessorTest {
       expectedHostName + ":" + expectedPortNum, falconStartupProperties.get("*.broker.url"));
 
     assertEquals("Falcon Kerberos Principal property not properly exported",
-        "falcon/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
+      "falcon/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
 
     assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
       "HTTP/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
@@ -4136,7 +4140,7 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertFalse("hbase.coprocessor.regionserver.classes should have been filtered out of configuration",
-                hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
+      hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
     assertTrue("hbase.coprocessor.master.classes should not have been filtered out of configuration",
                hbaseSiteProperties.containsKey("hbase.coprocessor.master.classes"));
     assertTrue("hbase.coprocessor.region.classes should not have been filtered out of configuration",
@@ -4329,7 +4333,7 @@ public class BlueprintConfigurationProcessorTest {
       "localhost", stormSiteProperties.get("supervisor.childopts"));
 
     assertEquals("nimbus startup settings not properly handled by cluster create",
-        "localhost", stormSiteProperties.get("nimbus.childopts"));
+      "localhost", stormSiteProperties.get("nimbus.childopts"));
 
     assertEquals("Kafka ganglia host property not properly handled by cluster create",
       "localhost", kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
@@ -4664,9 +4668,9 @@ public class BlueprintConfigurationProcessorTest {
         hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
     // verify that the Blueprint config processor has not overridden
     // the user's configuration to determine the active and
@@ -5206,6 +5210,178 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("https://host1:99999", clusterConfig.getPropertyValue("hive-site", "atlas.rest.address"));
   }
 
+  @Test
+  public void testRecommendConfiguration_applyStackDefaultsOnly() throws Exception {
+    // GIVEN
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> coreSiteMap = new HashMap<String, String>();
+    properties.put("core-site", coreSiteMap);
+    coreSiteMap.put("fs.default.name", expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.defaultFS", "hdfs://" + expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.stackDefault.key2", "dummyValue");
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton(expectedHostGroupName));
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    Configuration parentConfig = new Configuration(parentProperties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), createStackDefaults());
+
+    Configuration clusterConfig = new Configuration(properties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentConfig);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
+    topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    reset(stack);
+    expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
+    replay(stack);
+    // WHEN
+    configProcessor.doUpdateForClusterCreate();
+    // THEN
+    assertEquals(expectedHostName + ":" + expectedPortNum, clusterConfig.getPropertyValue("core-site", "fs.default.name"));
+    assertEquals("stackDefaultUpgraded", clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key1"));
+    // verify that fs.stackDefault.key2 is removed
+    assertNull(clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key2"));
+    // verify that fs.notStackDefault is filtered out
+    assertNull(clusterConfig.getPropertyValue("core-site", "fs.notStackDefault"));
+  }
+
+  @Test
+  public void testRecommendConfiguration_applyAlways() throws Exception {
+    // GIVEN
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> coreSiteMap = new HashMap<String, String>();
+    properties.put("core-site", coreSiteMap);
+    coreSiteMap.put("fs.default.name", expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.defaultFS", "hdfs://" + expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.stackDefault.key2", "dummyValue");
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton(expectedHostGroupName));
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), createStackDefaults());
+
+    Configuration clusterConfig = new Configuration(properties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
+    topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ALWAYS_APPLY);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    // WHEN
+    configProcessor.doUpdateForClusterCreate();
+    // THEN
+    assertEquals(expectedHostName + ":" + expectedPortNum, clusterConfig.getPropertyValue("core-site","fs.default.name"));
+    assertEquals("stackDefaultUpgraded", clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key1"));
+    // verify that fs.stackDefault.key2 is removed
+    assertNull(clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key2"));
+    // verify that fs.notStackDefault is not filtered out
+    assertNotNull(clusterConfig.getPropertyValue("core-site", "fs.notStackDefault"));
+    assertEquals(1, topology.getAdvisedConfigurations().size());
+  }
+
+  @Test
+  public void testRecommendConfiguration_neverApply() throws Exception {
+    // GIVEN
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> coreSiteMap = new HashMap<String, String>();
+    properties.put("core-site", coreSiteMap);
+    coreSiteMap.put("fs.default.name", expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.defaultFS", "hdfs://" + expectedHostName + ":" + expectedPortNum);
+    coreSiteMap.put("fs.stackDefault.key2", "dummyValue");
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton(expectedHostGroupName));
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), createStackDefaults());
+
+    Configuration clusterConfig = new Configuration(properties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
+    topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    // WHEN
+    configProcessor.doUpdateForClusterCreate();
+    // THEN
+    assertEquals(expectedHostName + ":" + expectedPortNum, clusterConfig.getPropertyValue("core-site", "fs.default.name"));
+    // verify that no any value added/upgraded/removed
+    assertNull(clusterConfig.getPropertyValue("core-site", "fs.notStackDefault"));
+    assertEquals("stackDefaultValue1", clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key1"));
+    assertNotNull(clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key2"));
+  }
+
+  private Map<String, AdvisedConfiguration> createAdvisedConfigMap() {
+    Map<String, AdvisedConfiguration> advMap = new HashMap<String, AdvisedConfiguration>();
+    Map<String, String> confProp = new HashMap<String, String>();
+    confProp.put("fs.stackDefault.key1", "stackDefaultUpgraded");
+    confProp.put("fs.notStackDefault", "notStackDefault");
+    Map<String, ValueAttributesInfo> valueAttributesInfoMap = new HashMap<String, ValueAttributesInfo>();
+    ValueAttributesInfo vaInfo = new ValueAttributesInfo();
+    vaInfo.setDelete("true");
+    valueAttributesInfoMap.put("fs.stackDefault.key2", vaInfo);
+    advMap.put("core-site", new AdvisedConfiguration(confProp, valueAttributesInfoMap));
+    return advMap;
+  }
+
 
   private static String createExportedAddress(String expectedPortNum, String expectedHostGroupName) {
     return createExportedHostName(expectedHostGroupName, expectedPortNum);
@@ -5224,6 +5400,19 @@ public class BlueprintConfigurationProcessorTest {
     return hostName + ":" + portNumber;
   }
 
+  private Configuration createStackDefaults() {
+    Map<String, Map<String, String>> stackDefaultProps =
+      new HashMap<String, Map<String, String>>();
+    Map<String, String> coreSiteDefault = new HashMap<String, String>();
+    coreSiteDefault.put("fs.stackDefault.key1", "stackDefaultValue1");
+    coreSiteDefault.put("fs.stackDefault.key2", "stackDefaultValue2");
+    stackDefaultProps.put("core-site", coreSiteDefault);
+
+    Map<String, Map<String, Map<String, String>>> stackDefaultAttributes =
+      new HashMap<String, Map<String, Map<String, String>>>();
+    return new Configuration(stackDefaultProps, stackDefaultAttributes);
+  }
+
   private ClusterTopology createClusterTopology(Blueprint blueprint, Configuration configuration,
                                                 Collection<TestHostGroup> hostGroups)
       throws InvalidTopologyException {
@@ -5266,7 +5455,11 @@ public class BlueprintConfigurationProcessorTest {
 
     replay(bp);
 
-    return new ClusterTopologyImpl(ambariConext, 1L, blueprint, configuration, hostGroupInfo);
+    ClusterTopology topology = new ClusterTopologyImpl
+      (ambariConext, 1L, blueprint, configuration, hostGroupInfo);
+    topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY);
+
+    return topology;
   }
 
   private class TestHostGroup {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8e138d9/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index bd4f13d..92e3e1c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -221,6 +221,7 @@ public class TopologyManagerTest {
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
     expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
+    expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
 
     expect(group1.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes();
     expect(group1.getCardinality()).andReturn("test cardinality").anyTimes();


Mime
View raw message