ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a...@apache.org
Subject ambari git commit: AMBARI-17599. Autoskip failure support for blueprint deployment (ajit)
Date Fri, 08 Jul 2016 06:48:46 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk fb12a7793 -> e6b2f1ebe


AMBARI-17599. Autoskip failure support for blueprint deployment (ajit)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e6b2f1eb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e6b2f1eb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e6b2f1eb

Branch: refs/heads/trunk
Commit: e6b2f1ebee8d52aaa4b07478e2b525041c2ea487
Parents: fb12a77
Author: Ajit Kumar <ajit@apache.org>
Authored: Thu Jul 7 23:46:31 2016 -0700
Committer: Ajit Kumar <ajit@apache.org>
Committed: Thu Jul 7 23:46:31 2016 -0700

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         |  21 +-
 .../internal/HostComponentResourceProvider.java |  23 +-
 .../internal/HostResourceProvider.java          |   4 +-
 .../ambari/server/stageplanner/RoleGraph.java   |   2 +
 .../ambari/server/topology/AmbariContext.java   |  18 +-
 .../ambari/server/topology/Blueprint.java       |   5 +
 .../ambari/server/topology/BlueprintImpl.java   |  14 +
 .../ambari/server/topology/ClusterTopology.java |   4 +-
 .../server/topology/ClusterTopologyImpl.java    |   8 +-
 .../ambari/server/topology/HostRequest.java     |  44 ++-
 .../ambari/server/topology/LogicalRequest.java  |  20 +-
 .../apache/ambari/server/topology/Setting.java  |   4 +
 .../internal/RequestResourceProviderTest.java   |   6 +-
 .../server/topology/BlueprintImplTest.java      | 328 +++++--------------
 .../ClusterDeployWithHostsSyspreppedTest.java   |   5 +-
 .../ClusterInstallWithoutStartTest.java         |   3 +-
 .../server/topology/LogicalRequestTest.java     |   1 +
 .../ambari/server/topology/SettingTest.java     |  12 +-
 18 files changed, 212 insertions(+), 310 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index aaf69df..e0528a0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -185,6 +185,7 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgre
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
+import org.apache.ambari.server.topology.Setting;
 import org.apache.ambari.server.utils.SecretReference;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.collections.CollectionUtils;
@@ -2083,15 +2084,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
                                 Map<String, Map<String, String>> configTags,
                                 RoleCommand roleCommand,
                                 Map<String, String> commandParamsInp,
-                                ServiceComponentHostEvent event
+                                ServiceComponentHostEvent event,
+                                boolean skipFailure
                                 )
                                 throws AmbariException {
 
     String serviceName = scHost.getServiceName();
 
     stage.addHostRoleExecutionCommand(scHost.getHost(),
-        Role.valueOf(scHost.getServiceComponentName()), roleCommand, event, cluster, serviceName,
-        false, false);
+        Role.valueOf(scHost.getServiceComponentName()), roleCommand, event, cluster, serviceName, false, skipFailure);
 
     String componentName = scHost.getServiceComponentName();
     String hostname = scHost.getHostName();
@@ -2459,9 +2460,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       Stage stage = createNewStage(requestStages.getLastStageId(), cluster,
           requestStages.getId(), requestProperties.get(REQUEST_CONTEXT_PROPERTY),
           clusterHostInfoJson, "{}", hostParamsJson);
+      boolean skipFailure = false;
+      if (requestProperties.containsKey(Setting.SETTING_NAME_SKIP_FAILURE) && requestProperties.get(Setting.SETTING_NAME_SKIP_FAILURE).equalsIgnoreCase("true")) {
+        skipFailure = true;
+      }
+      stage.setAutoSkipFailureSupported(skipFailure);
+      stage.setSkippable(skipFailure);
 
-      Collection<ServiceComponentHost> componentsToEnableKerberos = new ArrayList<ServiceComponentHost>();
-      Set<String> hostsToForceKerberosOperations = new HashSet<String>();
+      Collection<ServiceComponentHost> componentsToEnableKerberos = new ArrayList<>();
+      Set<String> hostsToForceKerberosOperations = new HashSet<>();
 
       for (String compName : changedScHosts.keySet()) {
         for (State newState : changedScHosts.get(compName).keySet()) {
@@ -2702,7 +2709,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               scHost.setState(State.INSTALLED);
             } else {
               createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags,
-                roleCommand, requestParameters, event);
+                roleCommand, requestParameters, event, skipFailure);
             }
 
           }
@@ -2820,7 +2827,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         new TreeMap<String, Map<String, Map<String, String>>>();
 
     createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags,
-                     roleCommand, null, null);
+                     roleCommand, null, null, false);
     ExecutionCommand ec = stage.getExecutionCommands().get(scHost.getHostName()).get(0).getExecutionCommand();
 
     // createHostAction does not take a hostLevelParams but creates one

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 9d8389a..df2b476 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -64,6 +64,7 @@ import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostDisableEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostRestoreEvent;
+import org.apache.ambari.server.topology.Setting;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
@@ -329,7 +330,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     return unsupportedProperties;
   }
 
-  public RequestStatusResponse install(String cluster, String hostname) throws  SystemException,
+  public RequestStatusResponse install(String cluster, String hostname, boolean skipFailure) throws  SystemException,
       UnsupportedPropertyException, NoSuchParentResourceException {
 
     RequestStageContainer requestStages;
@@ -338,15 +339,16 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     Map<String, Object> installProperties = new HashMap<String, Object>();
 
     installProperties.put(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, "INSTALLED");
-    Map<String, String> requestInfo = new HashMap<String, String>();
+    Map<String, String> requestInfo = new HashMap<>();
     requestInfo.put("context", String.format("Install components on host %s", hostname));
     requestInfo.put("phase", "INITIAL_INSTALL");
+    requestInfo.put(Setting.SETTING_NAME_SKIP_FAILURE, Boolean.toString(skipFailure));
     Request installRequest = PropertyHelper.getUpdateRequest(installProperties, requestInfo);
 
-    Predicate statePredicate = new EqualsPredicate<String>(HOST_COMPONENT_STATE_PROPERTY_ID, "INIT");
-    Predicate clusterPredicate = new EqualsPredicate<String>(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, cluster);
+    Predicate statePredicate = new EqualsPredicate<>(HOST_COMPONENT_STATE_PROPERTY_ID, "INIT");
+    Predicate clusterPredicate = new EqualsPredicate<>(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, cluster);
     // single host
-    Predicate hostPredicate = new EqualsPredicate<String>(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostname);
+    Predicate hostPredicate = new EqualsPredicate<>(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostname);
     //Predicate hostPredicate = new OrPredicate(hostPredicates.toArray(new Predicate[hostPredicates.size()]));
     Predicate hostAndStatePredicate = new AndPredicate(statePredicate, hostPredicate);
     Predicate installPredicate = new AndPredicate(hostAndStatePredicate, clusterPredicate);
@@ -374,18 +376,19 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   public RequestStatusResponse start(String cluster, String hostName) throws  SystemException,
     UnsupportedPropertyException, NoSuchParentResourceException {
 
-    return this.start(cluster, hostName, Collections.<String>emptySet());
+    return this.start(cluster, hostName, Collections.<String>emptySet(), false);
   }
 
-  public RequestStatusResponse start(String cluster, String hostName, Collection<String> installOnlyComponents) throws  SystemException,
+  public RequestStatusResponse start(String cluster, String hostName, Collection<String> installOnlyComponents, boolean skipFailure) throws  SystemException,
       UnsupportedPropertyException, NoSuchParentResourceException {
 
-    Map<String, String> requestInfo = new HashMap<String, String>();
+    Map<String, String> requestInfo = new HashMap<>();
     requestInfo.put("context", String.format("Start components on host %s", hostName));
     requestInfo.put("phase", "INITIAL_START");
+    requestInfo.put(Setting.SETTING_NAME_SKIP_FAILURE, Boolean.toString(skipFailure));
 
-    Predicate clusterPredicate = new EqualsPredicate<String>(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, cluster);
-    Predicate hostPredicate = new EqualsPredicate<String>(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostName);
+    Predicate clusterPredicate = new EqualsPredicate<>(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, cluster);
+    Predicate hostPredicate = new EqualsPredicate<>(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostName);
     //Predicate hostPredicate = new OrPredicate(hostPredicates.toArray(new Predicate[hostPredicates.size()]));
 
     RequestStageContainer requestStages;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index de7f209..6d555e6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -567,7 +567,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
     }
   }
 
-  public RequestStatusResponse install(final String cluster, final String hostname)
+  public RequestStatusResponse install(final String cluster, final String hostname, final boolean skipFailure)
       throws ResourceAlreadyExistsException,
       SystemException,
       NoSuchParentResourceException,
@@ -575,7 +575,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
 
 
     return ((HostComponentResourceProvider) getResourceProvider(Resource.Type.HostComponent)).
-        install(cluster, hostname);
+        install(cluster, hostname, skipFailure);
   }
 
   public RequestStatusResponse start(final String cluster, final String hostname)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
index c6279bc..c9ab6f9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
@@ -164,6 +164,8 @@ public class RoleGraph {
         origStage.getRequestContext(), origStage.getClusterHostInfo(),
         origStage.getCommandParamsStage(), origStage.getHostParamsStage());
     newStage.setSuccessFactors(origStage.getSuccessFactors());
+    newStage.setSkippable(origStage.isSkippable());
+    newStage.setAutoSkipFailureSupported(origStage.isAutoSkipOnFailureSupported());
     for (RoleGraphNode rgn : stageGraphNodes) {
       for (String host : rgn.getHosts()) {
         newStage.addExecutionCommandWrapper(origStage, host, rgn.getRole());

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 926d253..02aede6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -123,8 +123,10 @@ public class AmbariContext {
 
 
   //todo: change return type to a topology abstraction
-  public HostRoleCommand createAmbariTask(long requestId, long stageId, String component, String host, TaskType type) {
-    HostRoleCommand task = hostRoleCommandFactory.create(host, Role.valueOf(component), null, RoleCommand.valueOf(type.name()));
+  public HostRoleCommand createAmbariTask(long requestId, long stageId, String component, String host,
+                                          TaskType type, boolean skipFailure) {
+    HostRoleCommand task = hostRoleCommandFactory.create(
+            host, Role.valueOf(component), null, RoleCommand.valueOf(type.name()), false, skipFailure);
     task.setStatus(HostRoleStatus.PENDING);
     task.setCommandDetail(String.format("Logical Task: %s component %s on host %s", type.name(), component, host));
     task.setTaskId(nextTaskId.getAndIncrement());
@@ -136,7 +138,7 @@ public class AmbariContext {
 
   //todo: change return type to a topology abstraction
   public HostRoleCommand createAmbariTask(long taskId, long requestId, long stageId,
-                                          String component, String host, TaskType type) {
+                                          String component, String host, TaskType type, boolean skipFailure) {
     synchronized (nextTaskId) {
       if (nextTaskId.get() <= taskId) {
         nextTaskId.set(taskId + 1);
@@ -144,7 +146,7 @@ public class AmbariContext {
     }
 
     HostRoleCommand task = hostRoleCommandFactory.create(
-        host, Role.valueOf(component), null, RoleCommand.valueOf(type.name()));
+        host, Role.valueOf(component), null, RoleCommand.valueOf(type.name()), false, skipFailure);
     task.setStatus(HostRoleStatus.PENDING);
     task.setCommandDetail(String.format("Logical Task: %s component %s on host %s",
         type.name(), component, host));
@@ -347,18 +349,18 @@ public class AmbariContext {
     }
   }
 
-  public RequestStatusResponse installHost(String hostName, String clusterName) {
+  public RequestStatusResponse installHost(String hostName, String clusterName, boolean skipFailure) {
     try {
-      return getHostResourceProvider().install(clusterName, hostName);
+      return getHostResourceProvider().install(clusterName, hostName, skipFailure);
     } catch (Exception e) {
       e.printStackTrace();
       throw new RuntimeException("INSTALL Host request submission failed: " + e, e);
     }
   }
 
-  public RequestStatusResponse startHost(String hostName, String clusterName, Collection<String> installOnlyComponents) {
+  public RequestStatusResponse startHost(String hostName, String clusterName, Collection<String> installOnlyComponents, boolean skipFailure) {
     try {
-      return getHostComponentResourceProvider().start(clusterName, hostName, installOnlyComponents);
+      return getHostComponentResourceProvider().start(clusterName, hostName, installOnlyComponents, skipFailure);
     } catch (Exception e) {
       e.printStackTrace();
       throw new RuntimeException("START Host request submission failed: " + e, e);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
index 0edbaea..8061f37 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
@@ -96,6 +96,11 @@ public interface Blueprint {
    */
   public String getRecoveryEnabled(String serviceName, String componentName);
 
+  /**
+   * Check if auto skip failure is enabled.
+   * @return true if enabled, otherwise false.
+   */
+  public boolean shouldSkipFailure();
 
   /**
    * Get the stack associated with the blueprint.

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
index aeb9a2d..df0187e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
@@ -209,6 +209,20 @@ public class BlueprintImpl implements Blueprint {
   }
 
   @Override
+  public boolean shouldSkipFailure() {
+    if (setting == null) {
+      return false;
+    }
+    Set<HashMap<String, String>> settingValue = setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS);
+    for (Map<String, String> setting : settingValue) {
+      if (setting.containsKey(Setting.SETTING_NAME_SKIP_FAILURE)) {
+        return setting.get(Setting.SETTING_NAME_SKIP_FAILURE).equalsIgnoreCase("true");
+      }
+    }
+    return false;
+  }
+
+  @Override
   public Stack getStack() {
     return stack;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
index 3cdca4d..03fac3e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
@@ -144,7 +144,7 @@ public interface ClusterTopology {
    * @param hostName  host name
    * @return install response
    */
-  RequestStatusResponse installHost(String hostName);
+  RequestStatusResponse installHost(String hostName, boolean skipFailure);
 
   /**
    * Start the specified host.
@@ -152,7 +152,7 @@ public interface ClusterTopology {
    * @param hostName  host name
    * @return start response
    */
-  RequestStatusResponse startHost(String hostName);
+  RequestStatusResponse startHost(String hostName, boolean skipFailure);
 
   void setConfigRecommendationStrategy(ConfigRecommendationStrategy strategy);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index cee9406..699b82a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -224,9 +224,9 @@ public class ClusterTopologyImpl implements ClusterTopology {
   }
 
   @Override
-  public RequestStatusResponse installHost(String hostName) {
+  public RequestStatusResponse installHost(String hostName, boolean skipFailure) {
     try {
-      return ambariContext.installHost(hostName, ambariContext.getClusterName(getClusterId()));
+      return ambariContext.installHost(hostName, ambariContext.getClusterName(getClusterId()), skipFailure);
     } catch (AmbariException e) {
       LOG.error("Cannot get cluster name for clusterId = " + getClusterId(), e);
       throw new RuntimeException(e);
@@ -234,7 +234,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
   }
 
   @Override
-  public RequestStatusResponse startHost(String hostName) {
+  public RequestStatusResponse startHost(String hostName, boolean skipFailure) {
     try {
       String hostGroupName = getHostGroupForHost(hostName);
       HostGroup hostGroup = this.blueprint.getHostGroup(hostGroupName);
@@ -244,7 +244,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
       Collection<String> installOnlyComponents =
         hostGroup.getComponentNames(ProvisionAction.INSTALL_ONLY);
 
-      return ambariContext.startHost(hostName, ambariContext.getClusterName(getClusterId()), installOnlyComponents);
+      return ambariContext.startHost(hostName, ambariContext.getClusterName(getClusterId()), installOnlyComponents, skipFailure);
     } catch (AmbariException e) {
       LOG.error("Cannot get cluster name for clusterId = " + getClusterId(), e);
       throw new RuntimeException(e);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
index a9c26cc..4dd6f97 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
@@ -62,6 +62,7 @@ public class HostRequest implements Comparable<HostRequest> {
   private boolean containsMaster;
   private final long id;
   private boolean isOutstanding = true;
+  private final boolean skipFailure;
 
   private Map<TopologyTask, Map<String, Long>> logicalTaskMap = new HashMap<TopologyTask, Map<String, Long>>();
 
@@ -77,7 +78,7 @@ public class HostRequest implements Comparable<HostRequest> {
   private static PredicateCompiler predicateCompiler = new PredicateCompiler();
 
   public HostRequest(long requestId, long id, long clusterId, String hostname, String blueprintName,
-                     HostGroup hostGroup, Predicate predicate, ClusterTopology topology) {
+                     HostGroup hostGroup, Predicate predicate, ClusterTopology topology, boolean skipFailure) {
     this.requestId = requestId;
     this.id = id;
     this.clusterId = clusterId;
@@ -87,8 +88,8 @@ public class HostRequest implements Comparable<HostRequest> {
     this.predicate = predicate;
     containsMaster = hostGroup.containsMasterComponent();
     this.topology = topology;
-
-    createTasks();
+    this.skipFailure = skipFailure;
+    createTasks(this.skipFailure);
     LOG.info("HostRequest: Created request for host: " +
         (hostname == null ? "Host Assignment Pending" : hostname));
   }
@@ -103,7 +104,7 @@ public class HostRequest implements Comparable<HostRequest> {
    * @param entity       host request entity
    */
   public HostRequest(long requestId, long id, String predicate,
-                     ClusterTopology topology, TopologyHostRequestEntity entity) {
+                     ClusterTopology topology, TopologyHostRequestEntity entity, boolean skipFailure) {
 
     this.requestId = requestId;
     this.id = id;
@@ -115,6 +116,7 @@ public class HostRequest implements Comparable<HostRequest> {
     this.predicate = toPredicate(predicate);
     containsMaster = hostGroup.containsMasterComponent();
     this.topology = topology;
+    this.skipFailure = skipFailure;
 
     createTasksForReplay(entity);
 
@@ -172,12 +174,16 @@ public class HostRequest implements Comparable<HostRequest> {
     return ! isOutstanding;
   }
 
-  private void createTasks() {
+  public boolean shouldSkipFailure() {
+    return skipFailure;
+  }
+
+  private void createTasks(boolean skipFailure) {
     // high level topology tasks such as INSTALL, START, ...
     topologyTasks.add(new PersistHostResourcesTask());
     topologyTasks.add(new RegisterWithConfigGroupTask());
 
-    InstallHostTask installTask = new InstallHostTask();
+    InstallHostTask installTask = new InstallHostTask(skipFailure);
     topologyTasks.add(installTask);
     logicalTaskMap.put(installTask, new HashMap<String, Long>());
 
@@ -185,7 +191,7 @@ public class HostRequest implements Comparable<HostRequest> {
 
     StartHostTask startTask = null;
     if (!skipStartTaskCreate) {
-      startTask = new StartHostTask();
+      startTask = new StartHostTask(skipFailure);
       topologyTasks.add(startTask);
       logicalTaskMap.put(startTask, new HashMap<String, Long>());
     } else {
@@ -213,7 +219,7 @@ public class HostRequest implements Comparable<HostRequest> {
         LOG.info("Skipping create of INSTALL task for {} on {} because host is sysprepped.", component, hostName);
       } else {
         HostRoleCommand logicalInstallTask = context.createAmbariTask(
-          getRequestId(), id, component, hostName, AmbariContext.TaskType.INSTALL);
+          getRequestId(), id, component, hostName, AmbariContext.TaskType.INSTALL, skipFailure);
         logicalTasks.put(logicalInstallTask.getTaskId(), logicalInstallTask);
         logicalTaskMap.get(installTask).put(component, logicalInstallTask.getTaskId());
       }
@@ -221,7 +227,7 @@ public class HostRequest implements Comparable<HostRequest> {
       // if component isn't a client, add a start task
       if (!skipStartTaskCreate && stack != null && !stack.getComponentInfo(component).isClient()) {
         HostRoleCommand logicalStartTask = context.createAmbariTask(
-            getRequestId(), id, component, hostName, AmbariContext.TaskType.START);
+            getRequestId(), id, component, hostName, AmbariContext.TaskType.START, skipFailure);
         logicalTasks.put(logicalStartTask.getTaskId(), logicalStartTask);
         logicalTaskMap.get(startTask).put(component, logicalStartTask.getTaskId());
       }
@@ -231,14 +237,14 @@ public class HostRequest implements Comparable<HostRequest> {
   private void createTasksForReplay(TopologyHostRequestEntity entity) {
     topologyTasks.add(new PersistHostResourcesTask());
     topologyTasks.add(new RegisterWithConfigGroupTask());
-    InstallHostTask installTask = new InstallHostTask();
+    InstallHostTask installTask = new InstallHostTask(skipFailure);
     topologyTasks.add(installTask);
     logicalTaskMap.put(installTask, new HashMap<String, Long>());
 
     boolean skipStartTaskCreate = topology.getProvisionAction().equals(INSTALL_ONLY);
 
     if (!skipStartTaskCreate) {
-      StartHostTask startTask = new StartHostTask();
+      StartHostTask startTask = new StartHostTask(skipFailure);
       topologyTasks.add(startTask);
       logicalTaskMap.put(startTask, new HashMap<String, Long>());
     }
@@ -253,7 +259,7 @@ public class HostRequest implements Comparable<HostRequest> {
 
         AmbariContext.TaskType logicalTaskType = getLogicalTaskType(taskType);
         HostRoleCommand task = ambariContext.createAmbariTask(logicalTaskId, getRequestId(), id,
-            component, entity.getHostName(), logicalTaskType);
+            component, entity.getHostName(), logicalTaskType, skipFailure);
 
         logicalTasks.put(logicalTaskId, task);
         Long physicalTaskId = logicalTaskEntity.getPhysicalTaskId();
@@ -482,6 +488,11 @@ public class HostRequest implements Comparable<HostRequest> {
   //todo: extract
   private class InstallHostTask implements TopologyTask {
     private ClusterTopology clusterTopology;
+    private final boolean skipFailure;
+
+    public InstallHostTask(boolean skipFailure) {
+      this.skipFailure = skipFailure;
+    }
 
     @Override
     public Type getType() {
@@ -496,7 +507,7 @@ public class HostRequest implements Comparable<HostRequest> {
     @Override
     public void run() {
       LOG.info("HostRequest.InstallHostTask: Executing INSTALL task for host: " + hostname);
-      RequestStatusResponse response = clusterTopology.installHost(hostname);
+      RequestStatusResponse response = clusterTopology.installHost(hostname, skipFailure);
       // map logical install tasks to physical install tasks
       List<ShortTaskStatus> underlyingTasks = response.getTasks();
       for (ShortTaskStatus task : underlyingTasks) {
@@ -527,6 +538,11 @@ public class HostRequest implements Comparable<HostRequest> {
   //todo: extract
   private class StartHostTask implements TopologyTask {
     private ClusterTopology clusterTopology;
+    private final boolean skipFailure;
+
+    public StartHostTask(boolean skipFailure) {
+      this.skipFailure = skipFailure;
+    }
 
     @Override
     public Type getType() {
@@ -541,7 +557,7 @@ public class HostRequest implements Comparable<HostRequest> {
     @Override
     public void run() {
       LOG.info("HostRequest.StartHostTask: Executing START task for host: " + hostname);
-      RequestStatusResponse response = clusterTopology.startHost(hostname);
+      RequestStatusResponse response = clusterTopology.startHost(hostname, skipFailure);
       // map logical install tasks to physical install tasks
       List<ShortTaskStatus> underlyingTasks = response.getTasks();
       for (ShortTaskStatus task : underlyingTasks) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
index 7ec6088..3aaf589 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
@@ -29,8 +29,6 @@ import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicLong;
 
-import javax.annotation.Nullable;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -40,7 +38,6 @@ import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ShortTaskStatus;
 import org.apache.ambari.server.orm.dao.HostRoleCommandStatusSummaryDTO;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.orm.entities.TopologyHostGroupEntity;
 import org.apache.ambari.server.orm.entities.TopologyHostInfoEntity;
@@ -51,9 +48,8 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Maps;
+
 
 /**
  * Logical Request implementation used to provision a cluster deployed by Blueprints.
@@ -215,8 +211,9 @@ public class LogicalRequest extends Request {
       //todo: not sure what this byte array is???
       //stage.setClusterHostInfo();
       stage.setClusterId(getClusterId());
-      stage.setSkippable(false);
-      stage.setAutoSkipFailureSupported(false);
+      boolean skipFailure = hostRequest.shouldSkipFailure();
+      stage.setSkippable(skipFailure);
+      stage.setAutoSkipFailureSupported(skipFailure);
       // getTaskEntities() sync's state with physical tasks
       stage.setHostRoleCommands(hostRequest.getTaskEntities());
 
@@ -370,6 +367,7 @@ public class LogicalRequest extends Request {
   private void createHostRequests(TopologyRequest request, ClusterTopology topology) {
     Map<String, HostGroupInfo> hostGroupInfoMap = request.getHostGroupInfo();
     Blueprint blueprint = topology.getBlueprint();
+    boolean skipFailure = topology.getBlueprint().shouldSkipFailure();
     for (HostGroupInfo hostGroupInfo : hostGroupInfoMap.values()) {
       String groupName = hostGroupInfo.getHostGroupName();
       int hostCardinality = hostGroupInfo.getRequestedHostCount();
@@ -380,14 +378,14 @@ public class LogicalRequest extends Request {
           // host names are specified
           String hostname = hostnames.get(i);
           HostRequest hostRequest = new HostRequest(getRequestId(), hostIdCounter.getAndIncrement(), getClusterId(),
-              hostname, blueprint.getName(), blueprint.getHostGroup(groupName), null, topology);
+              hostname, blueprint.getName(), blueprint.getHostGroup(groupName), null, topology, skipFailure);
           synchronized (requestsWithReservedHosts) {
             requestsWithReservedHosts.put(hostname, hostRequest);
           }
         } else {
           // host count is specified
           HostRequest hostRequest = new HostRequest(getRequestId(), hostIdCounter.getAndIncrement(), getClusterId(),
-              null, blueprint.getName(), blueprint.getHostGroup(groupName), hostGroupInfo.getPredicate(), topology);
+              null, blueprint.getName(), blueprint.getHostGroup(groupName), hostGroupInfo.getPredicate(), topology, skipFailure);
           outstandingHostRequests.add(hostRequest);
         }
       }
@@ -420,7 +418,7 @@ public class LogicalRequest extends Request {
       }
     }
 
-
+    boolean skipFailure = topology.getBlueprint().shouldSkipFailure();
     for (TopologyHostRequestEntity hostRequestEntity : requestEntity.getTopologyHostRequestEntities()) {
       Long hostRequestId = hostRequestEntity.getId();
       synchronized (hostIdCounter) {
@@ -437,7 +435,7 @@ public class LogicalRequest extends Request {
 
       //todo: move predicate processing to host request
       HostRequest hostRequest = new HostRequest(getRequestId(), hostRequestId,
-          reservedHostName, topology, hostRequestEntity);
+          reservedHostName, topology, hostRequestEntity, skipFailure);
 
       allHostRequests.add(hostRequest);
       if (! hostRequest.isCompleted()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
index c03d833..601cbfd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
@@ -36,8 +36,12 @@ public class Setting {
 
   public static final String SETTING_NAME_COMPONENT_SETTINGS = "component_settings";
 
+  public static final String SETTING_NAME_DEPLOYMENT_SETTINGS = "deployment_settings";
+
   public static final String SETTING_NAME_RECOVERY_ENABLED = "recovery_enabled";
 
+  public static final String SETTING_NAME_SKIP_FAILURE = "skip_failure";
+
   public static final String SETTING_NAME_NAME = "name";
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index 4444714..d06aa1e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.controller.internal;
 
 
 import static org.apache.ambari.server.controller.internal.HostComponentResourceProvider.HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID;
+import org.apache.ambari.server.topology.Blueprint;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.expect;
@@ -1548,6 +1549,7 @@ public class RequestResourceProviderTest {
 
 
     ClusterTopology topology = createNiceMock(ClusterTopology.class);
+    Blueprint blueprint = createNiceMock(Blueprint.class);
     expect(topology.getClusterId()).andReturn(2L).anyTimes();
 
     Long clusterId = 2L;
@@ -1566,7 +1568,8 @@ public class RequestResourceProviderTest {
 
     TopologyRequest topologyRequest = createNiceMock(TopologyRequest.class);
     expect(topologyRequest.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
-    expect(topologyRequest.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes();
 
 
 
@@ -1576,6 +1579,7 @@ public class RequestResourceProviderTest {
     PowerMock.replayAll(
       topologyRequest,
       topology,
+      blueprint,
       managementController,
       clusters);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
index 0b06eb8..0608697 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
@@ -21,10 +21,12 @@ package org.apache.ambari.server.topology;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.orm.entities.BlueprintEntity;
 import org.apache.ambari.server.state.SecurityType;
+import org.junit.Before;
 import org.junit.Test;
 
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -32,98 +34,85 @@ import java.util.Set;
 
 import static org.easymock.EasyMock.*;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertFalse;
+
 
 /**
  * Blueprint unit tests.
  */
 public class BlueprintImplTest {
-
-  private static final Map<String, Map<String, Map<String, String>>> EMPTY_ATTRIBUTES =
-      new HashMap<String, Map<String, Map<String, String>>>();
-
-  private static final Map<String, Map<String, String>> EMPTY_PROPERTIES =
-      new HashMap<String, Map<String, String>>();
-
+  private static final Map<String, Map<String, Map<String, String>>> EMPTY_ATTRIBUTES = new HashMap<>();
+  private static final Map<String, Map<String, String>> EMPTY_PROPERTIES = new HashMap<>();
   private static final Configuration EMPTY_CONFIGURATION = new Configuration(EMPTY_PROPERTIES, EMPTY_ATTRIBUTES);
 
+  Stack stack = createNiceMock(Stack.class);
+  Setting setting = createNiceMock(Setting.class);
+  HostGroup group1 = createMock(HostGroup.class);
+  HostGroup group2 = createMock(HostGroup.class);
+  Set<HostGroup> hostGroups = new HashSet<>();
+  Set<String> group1Components = new HashSet<>();
+  Set<String> group2Components = new HashSet<>();
+  Map<String, Map<String, String>> properties = new HashMap<>();
+  Map<String, String> hdfsProps = new HashMap<>();
+  Configuration configuration = new Configuration(properties, EMPTY_ATTRIBUTES, EMPTY_CONFIGURATION);
+
+  @Before
+  public void setup() {
+    properties.put("hdfs-site", hdfsProps);
+    hdfsProps.put("foo", "val");
+    hdfsProps.put("bar", "val");
+    Map<String, String> category1Props = new HashMap<>();
+    properties.put("category1", category1Props);
+    category1Props.put("prop1", "val");
 
-
-  @Test
-  public void testValidateConfigurations__basic_positive() throws Exception {
-
-    Stack stack = createNiceMock(Stack.class);
-
-    HostGroup group1 = createMock(HostGroup.class);
-    HostGroup group2 = createMock(HostGroup.class);
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
-
-    Collection<String> group1Components = new HashSet<String>();
     group1Components.add("c1");
     group1Components.add("c2");
 
-    Set<String> group2Components = new HashSet<String>();
     group2Components.add("c1");
     group2Components.add("c3");
 
-    Collection<Stack.ConfigProperty> requiredHDFSProperties = new HashSet<Stack.ConfigProperty>();
+    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "foo")).andReturn(false).anyTimes();
+    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "bar")).andReturn(false).anyTimes();
+    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "some_password")).andReturn(true).anyTimes();
+    expect(stack.isPasswordProperty("HDFS", "category1", "prop1")).andReturn(false).anyTimes();
+    expect(stack.isPasswordProperty("SERVICE2", "category2", "prop2")).andReturn(false).anyTimes();
+    expect(stack.getServiceForComponent("c1")).andReturn("HDFS").anyTimes();
+    expect(stack.getServiceForComponent("c2")).andReturn("HDFS").anyTimes();
+    expect(stack.getServiceForComponent("c3")).andReturn("SERVICE2").anyTimes();
+    expect(group1.getName()).andReturn("group1").anyTimes();
+    expect(group2.getName()).andReturn("group2").anyTimes();
+    expect(group1.getConfiguration()).andReturn(EMPTY_CONFIGURATION).anyTimes();
+    expect(group1.getComponentNames()).andReturn(group1Components).anyTimes();
+    expect(group2.getComponentNames()).andReturn(group2Components).anyTimes();
+
+    Collection<Stack.ConfigProperty> requiredHDFSProperties = new HashSet<>();
     requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "foo", null));
     requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "bar", null));
     requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "some_password", null));
-
     requiredHDFSProperties.add(new Stack.ConfigProperty("category1", "prop1", null));
 
     Collection<Stack.ConfigProperty> requiredService2Properties = new HashSet<Stack.ConfigProperty>();
     requiredService2Properties.add(new Stack.ConfigProperty("category2", "prop2", null));
+    expect(stack.getRequiredConfigurationProperties("HDFS")).andReturn(requiredHDFSProperties).anyTimes();
+    expect(stack.getRequiredConfigurationProperties("SERVICE2")).andReturn(requiredService2Properties).anyTimes();
+  }
 
-    expect(stack.getServiceForComponent("c1")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c2")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c3")).andReturn("SERVICE2").atLeastOnce();
-
-    expect(stack.getRequiredConfigurationProperties("HDFS")).andReturn(requiredHDFSProperties).atLeastOnce();
-    expect(stack.getRequiredConfigurationProperties("SERVICE2")).andReturn(requiredService2Properties).atLeastOnce();
-
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "foo")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "bar")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "some_password")).andReturn(true).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "category1", "prop1")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("SERVICE2", "category2", "prop2")).andReturn(false).atLeastOnce();
-
-    expect(group1.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    expect(group1.getName()).andReturn("group1").anyTimes();
-    expect(group1.getComponentNames()).andReturn(group1Components).atLeastOnce();
+  @Test
+  public void testValidateConfigurations__basic_positive() throws Exception {
     expect(group1.getCardinality()).andReturn("1").atLeastOnce();
     expect(group1.getComponents()).andReturn(Arrays.asList(new Component("c1"), new Component("c2"))).atLeastOnce();
-
-    expect(group2.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    expect(group2.getName()).andReturn("group2").anyTimes();
-    expect(group2.getComponentNames()).andReturn(group2Components).atLeastOnce();
     expect(group2.getCardinality()).andReturn("1").atLeastOnce();
     expect(group2.getComponents()).andReturn(Arrays.asList(new Component("c1"), new Component("c3"))).atLeastOnce();
+    expect(group2.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
 
     replay(stack, group1, group2);
 
-    // Blueprint config
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hdfsProps = new HashMap<String, String>();
-    properties.put("hdfs-site", hdfsProps);
-    hdfsProps.put("foo", "val");
-    hdfsProps.put("bar", "val");
-
-    Map<String, String> category1Props = new HashMap<String, String>();
-    properties.put("category1", category1Props);
-    category1Props.put("prop1", "val");
-
-    Map<String, String> category2Props = new HashMap<String, String>();
+    Map<String, String> category2Props = new HashMap<>();
     properties.put("category2", category2Props);
     category2Props.put("prop2", "val");
 
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<String, Map<String, Map<String, String>>>();
-    // for this basic test not ensuring that stack properties are ignored, this is tested in another test
-    Configuration configuration = new Configuration(properties, attributes, EMPTY_CONFIGURATION);
-
     SecurityConfiguration securityConfiguration = new SecurityConfiguration(SecurityType.KERBEROS, "testRef", null);
     Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, securityConfiguration);
     blueprint.validateRequiredProperties();
@@ -134,158 +123,31 @@ public class BlueprintImplTest {
     assertTrue(entity.getSecurityDescriptorReference().equals("testRef"));
   }
 
-  @Test
+  @Test(expected = InvalidTopologyException.class)
   public void testValidateConfigurations__basic_negative() throws Exception {
-
-    Stack stack = createNiceMock(Stack.class);
-
-    HostGroup group1 = createNiceMock(HostGroup.class);
-    HostGroup group2 = createNiceMock(HostGroup.class);
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
-    hostGroups.add(group1);
-    hostGroups.add(group2);
-
-    Collection<String> group1Components = new HashSet<String>();
-    group1Components.add("c1");
-    group1Components.add("c2");
-
-    Collection<String> group2Components = new HashSet<String>();
-    group2Components.add("c1");
-    group2Components.add("c3");
-
-    Collection<Stack.ConfigProperty> requiredHDFSProperties = new HashSet<Stack.ConfigProperty>();
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "foo", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "bar", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "some_password", null));
-
-    requiredHDFSProperties.add(new Stack.ConfigProperty("category1", "prop1", null));
-
-    Collection<Stack.ConfigProperty> requiredService2Properties = new HashSet<Stack.ConfigProperty>();
-    requiredService2Properties.add(new Stack.ConfigProperty("category2", "prop2", null));
-
-    expect(stack.getServiceForComponent("c1")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c2")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c3")).andReturn("SERVICE2").atLeastOnce();
-
-    expect(stack.getRequiredConfigurationProperties("HDFS")).andReturn(requiredHDFSProperties).atLeastOnce();
-    expect(stack.getRequiredConfigurationProperties("SERVICE2")).andReturn(requiredService2Properties).atLeastOnce();
-
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "foo")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "bar")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "some_password")).andReturn(true).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "category1", "prop1")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("SERVICE2", "category2", "prop2")).andReturn(false).atLeastOnce();
-
-    expect(group1.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    expect(group1.getName()).andReturn("group1").anyTimes();
-    expect(group1.getComponentNames()).andReturn(group1Components).atLeastOnce();
-
     expect(group2.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    expect(group2.getName()).andReturn("group2").anyTimes();
-    expect(group2.getComponentNames()).andReturn(group2Components).atLeastOnce();
-
     replay(stack, group1, group2);
 
-    // Blueprint config
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hdfsProps = new HashMap<String, String>();
-    properties.put("hdfs-site", hdfsProps);
-    hdfsProps.put("foo", "val");
-    hdfsProps.put("bar", "val");
-    Map<String, String> category1Props = new HashMap<String, String>();
-    properties.put("category1", category1Props);
-    category1Props.put("prop1", "val");
-
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<String, Map<String, Map<String, String>>>();
-    // for this basic test not ensuring that stack properties are ignored, this is tested in another test
-    Configuration configuration = new Configuration(properties, attributes, EMPTY_CONFIGURATION);
-
     Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null);
-    try {
-      blueprint.validateRequiredProperties();
-      fail("Expected exception to be thrown for missing config property");
-    } catch (InvalidTopologyException e) {
-      System.out.println("****" + e.getMessage() + "***");
-    }
-
+    blueprint.validateRequiredProperties();
     verify(stack, group1, group2);
   }
 
   @Test
   public void testValidateConfigurations__hostGroupConfig() throws Exception {
-
-    Stack stack = createNiceMock(Stack.class);
-
-    HostGroup group1 = createMock(HostGroup.class);
-    HostGroup group2 = createMock(HostGroup.class);
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
-    hostGroups.add(group1);
-    hostGroups.add(group2);
-
-    Set<String> group1Components = new HashSet<String>();
-    group1Components.add("c1");
-    group1Components.add("c2");
-
-    Set<String> group2Components = new HashSet<String>();
-    group2Components.add("c1");
-    group2Components.add("c3");
-
-    Map<String, Map<String, String>> group2Props = new HashMap<String, Map<String, String>>();
-    Map<String, String> group2Category2Props = new HashMap<String, String>();
+    Map<String, Map<String, String>> group2Props = new HashMap<>();
+    Map<String, String> group2Category2Props = new HashMap<>();
     group2Props.put("category2", group2Category2Props);
     group2Category2Props.put("prop2", "val");
+    // set config for group2 which contains a required property
+    Configuration group2Configuration = new Configuration(group2Props, EMPTY_ATTRIBUTES, configuration);
+    expect(group2.getConfiguration()).andReturn(group2Configuration).atLeastOnce();
 
-    Collection<Stack.ConfigProperty> requiredHDFSProperties = new HashSet<Stack.ConfigProperty>();
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "foo", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "bar", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "some_password", null));
-
-    requiredHDFSProperties.add(new Stack.ConfigProperty("category1", "prop1", null));
-
-    Collection<Stack.ConfigProperty> requiredService2Properties = new HashSet<Stack.ConfigProperty>();
-    requiredService2Properties.add(new Stack.ConfigProperty("category2", "prop2", null));
-
-    expect(stack.getServiceForComponent("c1")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c2")).andReturn("HDFS").atLeastOnce();
-    expect(stack.getServiceForComponent("c3")).andReturn("SERVICE2").atLeastOnce();
-
-    expect(stack.getRequiredConfigurationProperties("HDFS")).andReturn(requiredHDFSProperties).atLeastOnce();
-    expect(stack.getRequiredConfigurationProperties("SERVICE2")).andReturn(requiredService2Properties).atLeastOnce();
-
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "foo")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "bar")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "hdfs-site", "some_password")).andReturn(true).atLeastOnce();
-    expect(stack.isPasswordProperty("HDFS", "category1", "prop1")).andReturn(false).atLeastOnce();
-    expect(stack.isPasswordProperty("SERVICE2", "category2", "prop2")).andReturn(false).atLeastOnce();
-
-    expect(group1.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    expect(group1.getName()).andReturn("group1").anyTimes();
-    expect(group1.getComponentNames()).andReturn(group1Components).atLeastOnce();
     expect(group1.getCardinality()).andReturn("1").atLeastOnce();
     expect(group1.getComponents()).andReturn(Arrays.asList(new Component("c1"), new Component("c2"))).atLeastOnce();
-
-    expect(group2.getName()).andReturn("group2").anyTimes();
-    expect(group2.getComponentNames()).andReturn(group2Components).atLeastOnce();
     expect(group2.getCardinality()).andReturn("1").atLeastOnce();
     expect(group2.getComponents()).andReturn(Arrays.asList(new Component("c1"), new Component("c3"))).atLeastOnce();
 
-    // Blueprint config
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hdfsProps = new HashMap<String, String>();
-    properties.put("hdfs-site", hdfsProps);
-    hdfsProps.put("foo", "val");
-    hdfsProps.put("bar", "val");
-
-    Map<String, String> category1Props = new HashMap<String, String>();
-    properties.put("category1", category1Props);
-    category1Props.put("prop1", "val");
-
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<String, Map<String, Map<String, String>>>();
-    Configuration configuration = new Configuration(properties, attributes, EMPTY_CONFIGURATION);
-    // set config for group2 which contains a required property
-    Configuration group2Configuration = new Configuration(group2Props, EMPTY_ATTRIBUTES, configuration);
-    expect(group2.getConfiguration()).andReturn(group2Configuration).atLeastOnce();
-
     replay(stack, group1, group2);
 
     Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null);
@@ -297,66 +159,42 @@ public class BlueprintImplTest {
     assertTrue(entity.getSecurityDescriptorReference() == null);
   }
 
-  @Test
-  public void testValidateConfigurations__secretReference(){
-    Stack stack = createNiceMock(Stack.class);
-
-    HostGroup group1 = createNiceMock(HostGroup.class);
-    HostGroup group2 = createNiceMock(HostGroup.class);
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
-    hostGroups.add(group1);
-    hostGroups.add(group2);
-
-    Set<String> group1Components = new HashSet<String>();
-    group1Components.add("c1");
-    group1Components.add("c2");
-
-    Set<String> group2Components = new HashSet<String>();
-    group2Components.add("c1");
-    group2Components.add("c3");
-
-    Map<String, Map<String, String>> group2Props = new HashMap<String, Map<String, String>>();
-    Map<String, String> group2Category2Props = new HashMap<String, String>();
+  @Test(expected = InvalidTopologyException.class)
+  public void testValidateConfigurations__secretReference() throws InvalidTopologyException {
+    Map<String, Map<String, String>> group2Props = new HashMap<>();
+    Map<String, String> group2Category2Props = new HashMap<>();
     group2Props.put("category2", group2Category2Props);
     group2Category2Props.put("prop2", "val");
-
-    Collection<Stack.ConfigProperty> requiredHDFSProperties = new HashSet<Stack.ConfigProperty>();
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "foo", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "bar", null));
-    requiredHDFSProperties.add(new Stack.ConfigProperty("hdfs-site", "some_password", null));
-
-    requiredHDFSProperties.add(new Stack.ConfigProperty("category1", "prop1", null));
-
-    Collection<Stack.ConfigProperty> requiredService2Properties = new HashSet<Stack.ConfigProperty>();
-    requiredService2Properties.add(new Stack.ConfigProperty("category2", "prop2", null));
-
-
-    // Blueprint config
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hdfsProps = new HashMap<String, String>();
-    properties.put("hdfs-site", hdfsProps);
-    hdfsProps.put("foo", "val");
-    hdfsProps.put("bar", "val");
     hdfsProps.put("secret", "SECRET:hdfs-site:1:test");
-
-    Map<String, String> category1Props = new HashMap<String, String>();
-    properties.put("category1", category1Props);
-    category1Props.put("prop1", "val");
-
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<String, Map<String, Map<String, String>>>();
-    Configuration configuration = new Configuration(properties, attributes, EMPTY_CONFIGURATION);
-    // set config for group2 which contains a required property
-
     replay(stack, group1, group2);
 
     Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null);
-    try {
-      blueprint.validateRequiredProperties();
-      fail("Expected exception to be thrown for using secret reference");
-    } catch (InvalidTopologyException e) {
-      System.out.println("****" + e.getMessage() + "***");
-    }
+    blueprint.validateRequiredProperties();
+    verify(stack, group1, group2);
+  }
 
+  @Test
+  public void testAutoSkipFailureEnabled() {
+    Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null, setting);
+    HashMap<String, String> skipFailureSetting = new HashMap<>();
+    skipFailureSetting.put(Setting.SETTING_NAME_SKIP_FAILURE, "true");
+    expect(setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS)).andReturn(Collections.singleton(skipFailureSetting));
+    replay(stack, setting);
+
+    assertTrue(blueprint.shouldSkipFailure());
+    verify(stack, setting);
+  }
+
+  @Test
+  public void testAutoSkipFailureDisabled() {
+    Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null, setting);
+    HashMap<String, String> skipFailureSetting = new HashMap<>();
+    skipFailureSetting.put(Setting.SETTING_NAME_SKIP_FAILURE, "false");
+    expect(setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS)).andReturn(Collections.singleton(skipFailureSetting));
+    replay(stack, setting);
+
+    assertFalse(blueprint.shouldSkipFailure());
+    verify(stack, setting);
   }
 
   //todo: ensure coverage for these existing tests

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithHostsSyspreppedTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithHostsSyspreppedTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithHostsSyspreppedTest.java
index d49c21d..8c308a7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithHostsSyspreppedTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithHostsSyspreppedTest.java
@@ -64,6 +64,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_ONLY;
+import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
@@ -347,9 +348,9 @@ public class ClusterDeployWithHostsSyspreppedTest {
 
 
     expect(ambariContext.createAmbariTask(anyLong(), anyLong(), eq("component3"),
-      anyString(), eq(AmbariContext.TaskType.INSTALL))).andReturn(hostRoleCommandInstallComponent3).times(3);
+      anyString(), eq(AmbariContext.TaskType.INSTALL), anyBoolean())).andReturn(hostRoleCommandInstallComponent3).times(3);
     expect(ambariContext.createAmbariTask(anyLong(), anyLong(), eq("component4"),
-      anyString(), eq(AmbariContext.TaskType.INSTALL))).andReturn(hostRoleCommandInstallComponent4).times(2);
+      anyString(), eq(AmbariContext.TaskType.INSTALL), anyBoolean())).andReturn(hostRoleCommandInstallComponent4).times(2);
 
     expect(hostRoleCommandInstallComponent3.getTaskId()).andReturn(1L).atLeastOnce();
     expect(hostRoleCommandInstallComponent3.getRoleCommand()).andReturn(RoleCommand.INSTALL).atLeastOnce();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
index ad441e4..78d5538 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
@@ -65,6 +65,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_ONLY;
+import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
@@ -343,7 +344,7 @@ public class ClusterInstallWithoutStartTest {
       andReturn(Collections.singletonList(configurationRequest3)).once();
     // INSTALL task expectation
     expect(ambariContext.createAmbariTask(anyLong(), anyLong(), anyString(),
-      anyString(), eq(AmbariContext.TaskType.INSTALL))).andReturn(hostRoleCommand).atLeastOnce();
+      anyString(), eq(AmbariContext.TaskType.INSTALL), anyBoolean())).andReturn(hostRoleCommand).atLeastOnce();
     expect(hostRoleCommand.getTaskId()).andReturn(1L).atLeastOnce();
     expect(hostRoleCommand.getRoleCommand()).andReturn(RoleCommand.INSTALL).atLeastOnce();
     expect(hostRoleCommand.getRole()).andReturn(Role.INSTALL_PACKAGES).atLeastOnce();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
index e979173..31bb717 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
@@ -113,6 +113,7 @@ public class LogicalRequestTest extends EasyMockSupport {
     expect(clusterTopology.getProvisionAction()).andReturn(ProvisionAction.INSTALL_ONLY).anyTimes();
     expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.getName()).andReturn("blueprintDef").anyTimes();
+    expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes();
 
     PowerMock.reset(AmbariServer.class);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e6b2f1eb/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java
index 5615d12..39d5609 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java
@@ -39,26 +39,32 @@ public class SettingTest {
     Map<String, Set<HashMap<String, String>>> properties = new HashMap<>();
     Set<HashMap<String, String>> setting1 = new HashSet<>();
     Set<HashMap<String, String>> setting2 = new HashSet<>();
+    Set<HashMap<String, String>> setting3 = new HashSet<>();
 
     // Setting 1: Property1
-    HashMap<String, String> setting1Properties1 = new HashMap<String, String>();
+    HashMap<String, String> setting1Properties1 = new HashMap<>();
     setting1Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "true");
     setting1.add(setting1Properties1);
 
     // Setting 2: Property1 and Property2
-    HashMap<String, String> setting2Properties1 = new HashMap<String, String>();
+    HashMap<String, String> setting2Properties1 = new HashMap<>();
     setting2Properties1.put(Setting.SETTING_NAME_NAME, "HDFS");
     setting2Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false");
 
-    HashMap<String, String> setting2Properties2 = new HashMap<String, String>();
+    HashMap<String, String> setting2Properties2 = new HashMap<>();
     setting2Properties2.put(Setting.SETTING_NAME_NAME, "TEZ");
     setting2Properties2.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false");
 
     setting2.add(setting2Properties1);
     setting2.add(setting2Properties2);
 
+    HashMap<String, String> setting3Properties1 = new HashMap<>();
+    setting1Properties1.put(Setting.SETTING_NAME_SKIP_FAILURE, "true");
+    setting1.add(setting3Properties1);
+
     properties.put(Setting.SETTING_NAME_RECOVERY_SETTINGS, setting1);
     properties.put(Setting.SETTING_NAME_SERVICE_SETTINGS, setting2);
+    properties.put(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS, setting3);
 
     Setting setting = new Setting(properties);
     assertEquals(properties, setting.getProperties());


Mime
View raw message