incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tbeerbo...@apache.org
Subject [3/4] AMBARI-3577 - Move service related code in AmbariManagementController to ServiceResourceProvider
Date Tue, 22 Oct 2013 18:50:13 GMT
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/1aad6407/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 7bd69ac..e0b9989 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -18,16 +18,36 @@
 package org.apache.ambari.server.controller.internal;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ClusterNotFoundException;
+import org.apache.ambari.server.DuplicateResourceException;
+import org.apache.ambari.server.ObjectNotFoundException;
+import org.apache.ambari.server.ParentObjectNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ServiceRequest;
 import org.apache.ambari.server.controller.ServiceResponse;
 import org.apache.ambari.server.controller.spi.*;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.commons.lang.StringUtils;
+
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -88,7 +108,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     createResources(new Command<Void>() {
       @Override
       public Void invoke() throws AmbariException {
-        getManagementController().createServices(requests);
+        createServices(requests);
         return null;
       }
     });
@@ -110,7 +130,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     Set<ServiceResponse> responses = getResources(new Command<Set<ServiceResponse>>() {
       @Override
       public Set<ServiceResponse> invoke() throws AmbariException {
-        return getManagementController().getServices(requests);
+        return getServices(requests);
       }
     });
 
@@ -146,16 +166,16 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
       }
 
       final boolean runSmokeTest = "true".equals(getQueryParameterValue(
-        QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate)) ? true : false;
+          QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate));
 
-      final boolean reconfigureClients = "false".equals(getQueryParameterValue(
-        QUERY_PARAMETERS_RECONFIGURE_CLIENT, predicate)) ? false : true;
+      final boolean reconfigureClients = !"false".equals(getQueryParameterValue(
+          QUERY_PARAMETERS_RECONFIGURE_CLIENT, predicate));
 
       response = modifyResources(new Command<RequestStatusResponse>() {
         @Override
         public RequestStatusResponse invoke() throws AmbariException {
-          return getManagementController().updateServices(requests,
-            request.getRequestInfoProperties(), runSmokeTest, reconfigureClients);
+          return updateServices(requests,
+              request.getRequestInfoProperties(), runSmokeTest, reconfigureClients);
         }
       });
     }
@@ -175,7 +195,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
       @Override
       public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().deleteServices(requests);
+        return deleteServices(requests);
       }
     });
 
@@ -204,13 +224,15 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
   }
 
 
-// ----- utility methods -------------------------------------------------
+  // ----- AbstractResourceProvider ----------------------------------------
 
   @Override
   protected Set<String> getPKPropertyIds() {
     return pkPropertyIds;
   }
 
+
+  // ----- utility methods -------------------------------------------------
   /**
    * Get a service request object from a map of property values.
    *
@@ -233,4 +255,508 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     }
     return svcRequest;
   }
+
+  // Create services from the given request.
+  protected synchronized void createServices(Set<ServiceRequest> requests)
+      throws AmbariException {
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return;
+    }
+
+    Clusters       clusters       = getManagementController().getClusters();
+    AmbariMetaInfo ambariMetaInfo = getManagementController().getAmbariMetaInfo();
+
+    // do all validation checks
+    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
+    Set<String> duplicates = new HashSet<String>();
+    for (ServiceRequest request : requests) {
+      if (request.getClusterName() == null
+          || request.getClusterName().isEmpty()
+          || request.getServiceName() == null
+          || request.getServiceName().isEmpty()) {
+        throw new IllegalArgumentException("Cluster name and service name"
+            + " should be provided when creating a service");
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Received a createService request"
+            + ", clusterName=" + request.getClusterName()
+            + ", serviceName=" + request.getServiceName()
+            + ", request=" + request);
+      }
+
+      if (!serviceNames.containsKey(request.getClusterName())) {
+        serviceNames.put(request.getClusterName(), new HashSet<String>());
+      }
+      if (serviceNames.get(request.getClusterName())
+          .contains(request.getServiceName())) {
+        // throw error later for dup
+        duplicates.add(request.getServiceName());
+        continue;
+      }
+      serviceNames.get(request.getClusterName()).add(request.getServiceName());
+
+      if (request.getDesiredState() != null
+          && !request.getDesiredState().isEmpty()) {
+        State state = State.valueOf(request.getDesiredState());
+        if (!state.isValidDesiredState()
+            || state != State.INIT) {
+          throw new IllegalArgumentException("Invalid desired state"
+              + " only INIT state allowed during creation"
+              + ", providedDesiredState=" + request.getDesiredState());
+        }
+      }
+
+      Cluster cluster;
+      try {
+        cluster = clusters.getCluster(request.getClusterName());
+      } catch (ClusterNotFoundException e) {
+        throw new ParentObjectNotFoundException("Attempted to add a service to a cluster which doesn't exist", e);
+      }
+      try {
+        Service s = cluster.getService(request.getServiceName());
+        if (s != null) {
+          // throw error later for dup
+          duplicates.add(request.getServiceName());
+          continue;
+        }
+      } catch (ServiceNotFoundException e) {
+        // Expected
+      }
+
+      StackId stackId = cluster.getDesiredStackVersion();
+      if (!ambariMetaInfo.isValidService(stackId.getStackName(),
+          stackId.getStackVersion(), request.getServiceName())) {
+        throw new IllegalArgumentException("Unsupported or invalid service"
+            + " in stack"
+            + ", clusterName=" + request.getClusterName()
+            + ", serviceName=" + request.getServiceName()
+            + ", stackInfo=" + stackId.getStackId());
+      }
+    }
+
+    // ensure only a single cluster update
+    if (serviceNames.size() != 1) {
+      throw new IllegalArgumentException("Invalid arguments, updates allowed"
+          + "on only one cluster at a time");
+    }
+
+    // Validate dups
+    if (!duplicates.isEmpty()) {
+      StringBuilder svcNames = new StringBuilder();
+      boolean first = true;
+      for (String svcName : duplicates) {
+        if (!first) {
+          svcNames.append(",");
+        }
+        first = false;
+        svcNames.append(svcName);
+      }
+      String clusterName = requests.iterator().next().getClusterName();
+      String msg;
+      if (duplicates.size() == 1) {
+        msg = "Attempted to create a service which already exists: "
+            + ", clusterName=" + clusterName  + " serviceName=" + svcNames.toString();
+      } else {
+        msg = "Attempted to create services which already exist: "
+            + ", clusterName=" + clusterName  + " serviceNames=" + svcNames.toString();
+      }
+      throw new DuplicateResourceException(msg);
+    }
+
+    ServiceFactory serviceFactory = getManagementController().getServiceFactory();
+
+    // now to the real work
+    for (ServiceRequest request : requests) {
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+
+      // FIXME initialize configs based off service.configVersions
+      Map<String, Config> configs = new HashMap<String, Config>();
+
+      State state = State.INIT;
+
+      // Already checked that service does not exist
+      Service s = serviceFactory.createNew(cluster, request.getServiceName());
+
+      s.setDesiredState(state);
+      s.updateDesiredConfigs(configs);
+      s.setDesiredStackVersion(cluster.getDesiredStackVersion());
+      cluster.addService(s);
+      s.persist();
+    }
+  }
+
+  // Get services from the given set of requests.
+  protected Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
+      throws AmbariException {
+    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
+    for (ServiceRequest request : requests) {
+      try {
+        response.addAll(getServices(request));
+      } catch (ServiceNotFoundException e) {
+        if (requests.size() == 1) {
+          // only throw exception if 1 request.
+          // there will be > 1 request in case of OR predicate
+          throw e;
+        }
+      }
+    }
+    return response;
+  }
+
+  // Get services from the given request.
+  private synchronized Set<ServiceResponse> getServices(ServiceRequest request)
+      throws AmbariException {
+    if (request.getClusterName() == null
+        || request.getClusterName().isEmpty()) {
+      throw new AmbariException("Invalid arguments, cluster name"
+          + " cannot be null");
+    }
+    Clusters clusters    = getManagementController().getClusters();
+    String   clusterName = request.getClusterName();
+
+    final Cluster cluster;
+    try {
+      cluster = clusters.getCluster(clusterName);
+    } catch (ObjectNotFoundException e) {
+      throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
+    }
+
+    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
+    if (request.getServiceName() != null) {
+      Service s = cluster.getService(request.getServiceName());
+      response.add(s.convertToResponse());
+      return response;
+    }
+
+    // TODO support search on predicates?
+
+    boolean checkDesiredState = false;
+    State desiredStateToCheck = null;
+    if (request.getDesiredState() != null
+        && !request.getDesiredState().isEmpty()) {
+      desiredStateToCheck = State.valueOf(request.getDesiredState());
+      if (!desiredStateToCheck.isValidDesiredState()) {
+        throw new IllegalArgumentException("Invalid arguments, invalid desired"
+            + " state, desiredState=" + desiredStateToCheck);
+      }
+      checkDesiredState = true;
+    }
+
+    for (Service s : cluster.getServices().values()) {
+      if (checkDesiredState
+          && (desiredStateToCheck != s.getDesiredState())) {
+        // skip non matching state
+        continue;
+      }
+      response.add(s.convertToResponse());
+    }
+    return response;
+  }
+
+  // Update services based on the given requests.
+  protected synchronized RequestStatusResponse updateServices(
+      Set<ServiceRequest> requests, Map<String, String> requestProperties,
+      boolean runSmokeTest, boolean reconfigureClients) throws AmbariException {
+
+    AmbariManagementController controller = getManagementController();
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return null;
+    }
+
+    Map<State, List<Service>> changedServices
+        = new HashMap<State, List<Service>>();
+    Map<State, List<ServiceComponent>> changedComps =
+        new HashMap<State, List<ServiceComponent>>();
+    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts =
+        new HashMap<String, Map<State, List<ServiceComponentHost>>>();
+    Collection<ServiceComponentHost> ignoredScHosts =
+        new ArrayList<ServiceComponentHost>();
+
+    Set<String> clusterNames = new HashSet<String>();
+    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
+    Set<State> seenNewStates = new HashSet<State>();
+
+    Clusters       clusters       = controller.getClusters();
+    AmbariMetaInfo ambariMetaInfo = controller.getAmbariMetaInfo();
+
+    for (ServiceRequest request : requests) {
+      if (request.getClusterName() == null
+          || request.getClusterName().isEmpty()
+          || request.getServiceName() == null
+          || request.getServiceName().isEmpty()) {
+        throw new IllegalArgumentException("Invalid arguments, cluster name"
+            + " and service name should be provided to update services");
+      }
+
+      LOG.info("Received a updateService request"
+          + ", clusterName=" + request.getClusterName()
+          + ", serviceName=" + request.getServiceName()
+          + ", request=" + request.toString());
+
+      clusterNames.add(request.getClusterName());
+
+      if (clusterNames.size() > 1) {
+        throw new IllegalArgumentException("Updates to multiple clusters is not"
+            + " supported");
+      }
+
+      if (!serviceNames.containsKey(request.getClusterName())) {
+        serviceNames.put(request.getClusterName(), new HashSet<String>());
+      }
+      if (serviceNames.get(request.getClusterName())
+          .contains(request.getServiceName())) {
+        // TODO throw single exception
+        throw new IllegalArgumentException("Invalid request contains duplicate"
+            + " service names");
+      }
+      serviceNames.get(request.getClusterName()).add(request.getServiceName());
+
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+      Service s = cluster.getService(request.getServiceName());
+      State oldState = s.getDesiredState();
+      State newState = null;
+      if (request.getDesiredState() != null) {
+        newState = State.valueOf(request.getDesiredState());
+        if (!newState.isValidDesiredState()) {
+          throw new IllegalArgumentException("Invalid arguments, invalid"
+              + " desired state, desiredState=" + newState);
+        }
+      }
+
+      if (request.getConfigVersions() != null) {
+        State.checkUpdateConfiguration(s, newState);
+
+        for (Map.Entry<String,String> entry :
+            request.getConfigVersions().entrySet()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Attaching config to service"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", configType=" + entry.getKey()
+                + ", configTag=" + entry.getValue());
+          }
+          Config config = cluster.getConfig(
+              entry.getKey(), entry.getValue());
+          if (null == config) {
+            // throw error for invalid config
+            throw new AmbariException("Trying to update service with"
+                + " invalid configs"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + s.getName()
+                + ", invalidConfigType=" + entry.getKey()
+                + ", invalidConfigTag=" + entry.getValue());
+          }
+        }
+      }
+
+
+      if (newState == null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Nothing to do for new updateService request"
+              + ", clusterName=" + request.getClusterName()
+              + ", serviceName=" + request.getServiceName()
+              + ", newDesiredState=null");
+        }
+        continue;
+      }
+
+      seenNewStates.add(newState);
+
+      if (newState != oldState) {
+        if (!State.isValidDesiredStateTransition(oldState, newState)) {
+          throw new AmbariException("Invalid transition for"
+              + " service"
+              + ", clusterName=" + cluster.getClusterName()
+              + ", clusterId=" + cluster.getClusterId()
+              + ", serviceName=" + s.getName()
+              + ", currentDesiredState=" + oldState
+              + ", newDesiredState=" + newState);
+
+        }
+        if (!changedServices.containsKey(newState)) {
+          changedServices.put(newState, new ArrayList<Service>());
+        }
+        changedServices.get(newState).add(s);
+      }
+
+      // TODO should we check whether all servicecomponents and
+      // servicecomponenthosts are in the required desired state?
+
+      for (ServiceComponent sc : s.getServiceComponents().values()) {
+        State oldScState = sc.getDesiredState();
+        if (newState != oldScState) {
+          if (sc.isClientComponent() &&
+              !newState.isValidClientComponentState()) {
+            continue;
+          }
+          if (!State.isValidDesiredStateTransition(oldScState, newState)) {
+            throw new AmbariException("Invalid transition for"
+                + " servicecomponent"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + sc.getServiceName()
+                + ", componentName=" + sc.getName()
+                + ", currentDesiredState=" + oldScState
+                + ", newDesiredState=" + newState);
+          }
+          if (!changedComps.containsKey(newState)) {
+            changedComps.put(newState, new ArrayList<ServiceComponent>());
+          }
+          changedComps.get(newState).add(sc);
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Handling update to ServiceComponent"
+              + ", clusterName=" + request.getClusterName()
+              + ", serviceName=" + s.getName()
+              + ", componentName=" + sc.getName()
+              + ", currentDesiredState=" + oldScState
+              + ", newDesiredState=" + newState);
+        }
+        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()){
+          State oldSchState = sch.getState();
+          if (oldSchState == State.MAINTENANCE || oldSchState == State.UNKNOWN) {
+            //Ignore host components updates in this state
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Ignoring ServiceComponentHost"
+                  + ", clusterName=" + request.getClusterName()
+                  + ", serviceName=" + s.getName()
+                  + ", componentName=" + sc.getName()
+                  + ", hostname=" + sch.getHostName()
+                  + ", currentState=" + oldSchState
+                  + ", newDesiredState=" + newState);
+            }
+            continue;
+          }
+          if (newState == oldSchState) {
+            ignoredScHosts.add(sch);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Ignoring ServiceComponentHost"
+                  + ", clusterName=" + request.getClusterName()
+                  + ", serviceName=" + s.getName()
+                  + ", componentName=" + sc.getName()
+                  + ", hostname=" + sch.getHostName()
+                  + ", currentState=" + oldSchState
+                  + ", newDesiredState=" + newState);
+            }
+            continue;
+          }
+          if (sc.isClientComponent() &&
+              !newState.isValidClientComponentState()) {
+            continue;
+          }
+          /**
+           * This is hack for now wherein we don't fail if the
+           * sch is in INSTALL_FAILED
+           */
+          if (!State.isValidStateTransition(oldSchState, newState)) {
+            String error = "Invalid transition for"
+                + " servicecomponenthost"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + sch.getServiceName()
+                + ", componentName=" + sch.getServiceComponentName()
+                + ", hostname=" + sch.getHostName()
+                + ", currentState=" + oldSchState
+                + ", newDesiredState=" + newState;
+            StackId sid = cluster.getDesiredStackVersion();
+
+            if ( ambariMetaInfo.getComponentCategory(
+                sid.getStackName(), sid.getStackVersion(), sc.getServiceName(),
+                sch.getServiceComponentName()).isMaster()) {
+              throw new AmbariException(error);
+            } else {
+              LOG.warn("Ignoring: " + error);
+              continue;
+            }
+          }
+          if (!changedScHosts.containsKey(sc.getName())) {
+            changedScHosts.put(sc.getName(),
+                new HashMap<State, List<ServiceComponentHost>>());
+          }
+          if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
+            changedScHosts.get(sc.getName()).put(newState,
+                new ArrayList<ServiceComponentHost>());
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Handling update to ServiceComponentHost"
+                + ", clusterName=" + request.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", componentName=" + sc.getName()
+                + ", hostname=" + sch.getHostName()
+                + ", currentState=" + oldSchState
+                + ", newDesiredState=" + newState);
+          }
+          changedScHosts.get(sc.getName()).get(newState).add(sch);
+        }
+      }
+    }
+
+    if (seenNewStates.size() > 1) {
+      // TODO should we handle this scenario
+      throw new IllegalArgumentException("Cannot handle different desired state"
+          + " changes for a set of services at the same time");
+    }
+
+    for (ServiceRequest request : requests) {
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+      Service s = cluster.getService(request.getServiceName());
+      if (request.getConfigVersions() != null) {
+        Map<String, Config> updated = new HashMap<String, Config>();
+
+        for (Map.Entry<String,String> entry : request.getConfigVersions().entrySet()) {
+          Config config = cluster.getConfig(entry.getKey(), entry.getValue());
+          updated.put(config.getType(), config);
+        }
+
+        if (!updated.isEmpty()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Updating service configs, attaching configs"
+                + ", clusterName=" + request.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", configCount=" + updated.size());
+          }
+          s.updateDesiredConfigs(updated);
+          s.persist();
+        }
+
+        for (ServiceComponent sc : s.getServiceComponents().values()) {
+          sc.deleteDesiredConfigs(updated.keySet());
+          for (ServiceComponentHost sch :
+              sc.getServiceComponentHosts().values()) {
+            sch.deleteDesiredConfigs(updated.keySet());
+            sch.persist();
+          }
+          sc.persist();
+        }
+      }
+    }
+
+    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
+
+    return controller.createStages(cluster, requestProperties, null, changedServices, changedComps, changedScHosts,
+        ignoredScHosts, runSmokeTest, reconfigureClients);
+  }
+
+  // Delete services based on the given set of requests
+  protected RequestStatusResponse deleteServices(Set<ServiceRequest> request)
+      throws AmbariException {
+
+    Clusters clusters    = getManagementController().getClusters();
+
+    for (ServiceRequest serviceRequest : request) {
+      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceName())) {
+        // FIXME throw correct error
+        throw new AmbariException("invalid arguments");
+      } else {
+        clusters.getCluster(serviceRequest.getClusterName()).deleteService(serviceRequest.getServiceName());
+      }
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/1aad6407/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/State.java b/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
index 034e7e8..e0bc115 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.state;
 
+import org.apache.ambari.server.AmbariException;
+
 public enum State {
   /**
    * Initial/Clean state.
@@ -97,25 +99,6 @@ public enum State {
   }
 
   /**
-   * Indicates whether or not its a state indicating a task in progress.
-   *
-   * @return true if this is a state indicating progress.
-   */
-  public boolean isInProgressState() {
-    switch (State.values()[this.state]) {
-      case INSTALLING:
-      case STARTING:
-      case STOPPING:
-      case UNINSTALLING:
-      case WIPING_OUT:
-      case UPGRADING:
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  /**
    * Indicates whether or not it is a valid state for the client component.
    *
    * @return true if this is a valid state for a client component.
@@ -150,4 +133,158 @@ public enum State {
         return false;
     }
   }
+
+  /**
+   * Utility method to determine whether or not a valid transition can be made from the given states.
+   *
+   * @param startState    the starting state
+   * @param desiredState  the desired state
+   *
+   * @return true iff a valid transition can be made from the starting state to the desired state
+   */
+  public static boolean isValidStateTransition(State startState, State desiredState) {
+    switch(desiredState) {
+      case INSTALLED:
+        if (startState == State.INIT
+            || startState == State.UNINSTALLED
+            || startState == State.INSTALLED
+            || startState == State.INSTALLING
+            || startState == State.STARTED
+            || startState == State.INSTALL_FAILED
+            || startState == State.UPGRADING
+            || startState == State.STOPPING
+            || startState == State.UNKNOWN
+            || startState == State.MAINTENANCE) {
+          return true;
+        }
+        break;
+      case STARTED:
+        if (startState == State.INSTALLED
+            || startState == State.STARTING
+            || startState == State.STARTED) {
+          return true;
+        }
+        break;
+      case UNINSTALLED:
+        if (startState == State.INSTALLED
+            || startState == State.UNINSTALLED
+            || startState == State.UNINSTALLING) {
+          return true;
+        }
+      case INIT:
+        if (startState == State.UNINSTALLED
+            || startState == State.INIT
+            || startState == State.WIPING_OUT) {
+          return true;
+        }
+      case MAINTENANCE:
+        if (startState == State.INSTALLED
+            || startState == State.UNKNOWN) {
+          return true;
+        }
+    }
+    return false;
+  }
+
+  /**
+   * Utility method to determine whether or not the given desired state is valid for the given starting state.
+   *
+   * @param startState    the starting state
+   * @param desiredState  the desired state
+   *
+   * @return true iff the given desired state is valid for the given starting state
+   */
+  public static boolean isValidDesiredStateTransition(State startState, State desiredState) {
+    switch(desiredState) {
+      case INSTALLED:
+        if (startState == State.INIT
+            || startState == State.UNINSTALLED
+            || startState == State.INSTALLED
+            || startState == State.STARTED
+            || startState == State.STOPPING) {
+          return true;
+        }
+        break;
+      case STARTED:
+        if (startState == State.INSTALLED
+            || startState == State.STARTED) {
+          return true;
+        }
+        break;
+    }
+    return false;
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * component host for the given states.
+   *
+   * @param serviceComponentHost  the service component host
+   * @param currentState          the current state
+   * @param desiredState          the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(
+      ServiceComponentHost serviceComponentHost,
+      State currentState, State desiredState)
+      throws AmbariException {
+
+    if (desiredState != null) {
+      if (!(desiredState == State.INIT
+          || desiredState == State.INSTALLED
+          || desiredState == State.STARTED)) {
+        throw new AmbariException("Changing of configs not supported"
+            + " for this transition"
+            + ", clusterName=" + serviceComponentHost.getClusterName()
+            + ", serviceName=" + serviceComponentHost.getServiceName()
+            + ", componentName=" + serviceComponentHost.getServiceComponentName()
+            + ", hostname=" + serviceComponentHost.getHostName()
+            + ", currentState=" + currentState
+            + ", newDesiredState=" + desiredState);
+      }
+    }
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * component for the given state.
+   *
+   * @param serviceComponent  the service component
+   * @param desiredState      the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(
+      ServiceComponent serviceComponent,
+      State desiredState)
+      throws AmbariException {
+    for (ServiceComponentHost sch :
+        serviceComponent.getServiceComponentHosts().values()) {
+      checkUpdateConfiguration(sch,
+          sch.getState(), desiredState);
+    }
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * for the given state.
+   *
+   * @param service       the service
+   * @param desiredState  the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(Service service,
+                                              State desiredState)
+      throws AmbariException {
+    for (ServiceComponent component :
+        service.getServiceComponents().values()) {
+      checkUpdateConfiguration(component,
+          desiredState);
+    }
+  }
+
+
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/1aad6407/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 943fc65..9dbcfcd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -18,45 +18,16 @@
 
 package org.apache.ambari.server.controller;
 
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.lang.reflect.Type;
-import java.text.MessageFormat;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.gson.Gson;
+import com.google.inject.Injector;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.ParentObjectNotFoundException;
-import org.apache.ambari.server.Role;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -64,20 +35,27 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.Predicate;
 import org.easymock.Capture;
 import org.junit.Test;
 
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * AmbariManagementControllerImpl unit tests
@@ -286,151 +264,6 @@ public class AmbariManagementControllerImplTest {
   }
 
   @Test
-  public void testGetServices() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
-
-    expect(service.convertToResponse()).andReturn(response);
-    // replay mocks
-    replay(injector, clusters, cluster, service, response);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, service, response);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetServices___ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getServices(setRequests);
-      fail("expected ServiceNotFoundException");
-    } catch (ServiceNotFoundException e) {
-      // expected
-    }
-
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service1 = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-    ServiceResponse response2 = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request2 = new ServiceRequest("cluster1", "service2", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request3 = new ServiceRequest("cluster1", "service3", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request4 = new ServiceRequest("cluster1", "service4", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
-    expect(cluster.getService("service1")).andReturn(service1);
-    expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
-    expect(cluster.getService("service3")).andThrow(new ServiceNotFoundException("cluster1", "service3"));
-    expect(cluster.getService("service4")).andReturn(service2);
-
-    expect(service1.convertToResponse()).andReturn(response);
-    expect(service2.convertToResponse()).andReturn(response2);
-    // replay mocks
-    replay(injector, clusters, cluster, service1, service2, response, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, service1, service2, response, response2);
-  }
-
-  @Test
   public void testGetComponents() throws Exception {
     // member state mocks
     Injector injector = createStrictMock(Injector.class);
@@ -1571,671 +1404,4 @@ public class AmbariManagementControllerImplTest {
     verify(injector, clusters, cluster, response1, response2, response3, stack, metaInfo, service1, service2,
         component1, component2, componentHost1, componentHost2, componentHost3);
   }
-
-  @Test
-  public void testMaintenanceAndDeleteStates() throws Exception {
-    Map<String,String> mapRequestProps = new HashMap<String, String>();
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-        
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/main/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "target/version");
-        properties.setProperty(Configuration.OS_VERSION_KEY,
-            "centos5");
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-      Gson gson = new Gson();
-  
-      clusters.addHost("host1");
-      clusters.addHost("host2");
-      clusters.addHost("host3");
-      Host host = clusters.getHost("host1");
-      host.setOsType("centos5");
-      host.persist();
-      host = clusters.getHost("host2");
-      host.setOsType("centos5");
-      host.persist();
-      host = clusters.getHost("host3");
-      host.setOsType("centos5");
-      host.persist();
-  
-      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
-      amc.createCluster(clusterRequest);
-  
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Type confType = new TypeToken<Map<String, String>>() {
-      }.getType();
-  
-      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
-          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-      configurationRequest = new ConfigurationRequest("c1", "global", "version1",
-          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS",
-          gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType)
-          , null));
-  
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
-      hostRequests.add(new HostRequest("host1", "c1", null));
-      hostRequests.add(new HostRequest("host2", "c1", null));
-      hostRequests.add(new HostRequest("host3", "c1", null));
-  
-      amc.createHosts(hostRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-  
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Cluster cluster = clusters.getCluster("c1");
-      Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-  
-      ServiceComponentHost componentHost = namenodes.get("host1");
-  
-      Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-      hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-      hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.MAINTENANCE, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.INSTALLED, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.MAINTENANCE, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(2, namenodes.size());
-  
-      componentHost = namenodes.get("host2");
-      componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-      componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-  
-      RequestStatusResponse response = amc.updateServices(serviceRequests,
-        mapRequestProps, true, false);
-      for (ShortTaskStatus shortTaskStatus : response.getTasks()) {
-        assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole()));
-      }
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-  
-      amc.deleteHostComponents(componentHostRequests);
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-  
-      // testing the behavior for runSmokeTest flag
-      // piggybacking on this test to avoid setting up the mock cluster
-      testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests);
-  
-      // should be able to add the host component back
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      amc.createHostComponents(componentHostRequests);
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(2, namenodes.size());
-      
-      
-      // make unknown
-      ServiceComponentHost sch = null;
-      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-        if (tmp.getServiceComponentName().equals("DATANODE")) {
-          tmp.setState(State.UNKNOWN);
-          sch = tmp;
-        }
-      }
-      assertNotNull(sch);
-  
-      // make maintenance
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE"));
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, false);
-      assertEquals(State.MAINTENANCE, sch.getState ());
-      
-      // confirm delete
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      amc.deleteHostComponents(componentHostRequests);
-      
-      sch = null;
-      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-        if (tmp.getServiceComponentName().equals("DATANODE")) {
-          sch = tmp;
-        }
-      }
-      assertNull(sch);
-    
-      /*
-      *Test remove service
-      */
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", null, null, null));
-      assertEquals(1, amc.getServices(serviceRequests).size());
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-      amc.deleteServices(serviceRequests);
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", null, null, null));     
-      assertEquals(0, amc.getServices(serviceRequests).size());
-      
-      /*
-      *Test add service again
-      */
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-      amc.createServices(serviceRequests);
-      assertEquals(1, amc.getServices(serviceRequests).size());
-      //Create new configs
-      configurationRequest = new ConfigurationRequest("c1", "core-site", "version2",
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2",
-          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-      configurationRequest = new ConfigurationRequest("c1", "global", "version2",
-          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);    
-      //Add configs to service
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS",
-          gson.<Map<String, String>>fromJson("{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType)
-          , null));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-      //Crate service components
-      serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-      amc.createComponents(serviceComponentRequests);
-      
-      //Create ServiceComponentHosts
-      componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-      amc.createHostComponents(componentHostRequests);    
-  
-      
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-      Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-      assertEquals(3, datanodes.size());
-      Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes2.size());
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }    
-  }
-
-  private void testRunSmokeTestFlag(Map<String, String> mapRequestProps,
-                                    AmbariManagementController amc,
-                                    Set<ServiceRequest> serviceRequests)
-      throws AmbariException {
-    RequestStatusResponse response;//Starting HDFS service. No run_smoke_test flag is set, smoke
-
-    //Stopping HDFS service
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps, false,
-      false);
-
-    //Starting HDFS service. No run_smoke_test flag is set, smoke
-    // test(HDFS_SERVICE_CHECK) won't run
-    boolean runSmokeTest = false;
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps,
-      runSmokeTest, false);
-
-    List<ShortTaskStatus> taskStatuses = response.getTasks();
-    boolean smokeTestRequired = false;
-    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
-      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
-         smokeTestRequired= true;
-      }
-    }
-    assertFalse(smokeTestRequired);
-
-    //Stopping HDFS service
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps, false,
-      false);
-
-    //Starting HDFS service again.
-    //run_smoke_test flag is set, smoke test will be run
-    runSmokeTest = true;
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps,
-      runSmokeTest, false);
-
-    taskStatuses = response.getTasks();
-    smokeTestRequired = false;
-    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
-      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
-        smokeTestRequired= true;
-      }
-    }
-    assertTrue(smokeTestRequired);
-  }
-
-
-  @Test
-  public void testScheduleSmokeTest() throws Exception {
-
-    final String HOST1 = "host1";
-    final String OS_TYPE = "centos5";
-    final String STACK_ID = "HDP-2.0.1";
-    final String CLUSTER_NAME = "c1";
-    final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK";
-    final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK";
-    final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK";
-
-    Map<String,String> mapRequestProps = Collections.<String,String>emptyMap();
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/test/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "../version");
-        properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE);
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-  
-      clusters.addHost(HOST1);
-      Host host = clusters.getHost(HOST1);
-      host.setOsType(OS_TYPE);
-      host.persist();
-  
-      ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
-      amc.createCluster(clusterRequest);
-  
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
-      hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null));
-  
-      amc.createHosts(hostRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      //Install services
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.INSTALLED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.INSTALLED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.INSTALLED.name()));
-  
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Cluster cluster = clusters.getCluster(CLUSTER_NAME);
-  
-      for (String serviceName : cluster.getServices().keySet() ) {
-  
-        for(String componentName: cluster.getService(serviceName).getServiceComponents().keySet()) {
-  
-          Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName).getServiceComponent(componentName).getServiceComponentHosts();
-  
-          for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) {
-            ServiceComponentHost cHost = entry.getValue();
-            cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), STACK_ID));
-            cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-          }
-        }
-      }
-  
-      //Start services
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.STARTED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.STARTED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.STARTED.name()));
-  
-      RequestStatusResponse response = amc.updateServices(serviceRequests,
-        mapRequestProps, true, false);
-  
-      Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(HDFS_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for HDFS
-      assertEquals(1, hdfsSmokeTasks.size());
-  
-      Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for MAPREDUCE2
-      assertEquals(1, mapreduce2SmokeTasks.size());
-  
-      Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(YARN_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for YARN
-      assertEquals(1, yarnSmokeTasks.size());
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }
-  }
-
-  private class RolePredicate implements Predicate {
-
-    private String role;
-
-    public RolePredicate(String role) {
-      this.role = role;
-    }
-
-    @Override
-    public boolean evaluate(Object obj) {
-      ShortTaskStatus task = (ShortTaskStatus)obj;
-      return task.getRole().equals(role);
-    }
-  }
-
-  
-  @Test
-  public void testDeleteClusterCreateHost() throws Exception {
-    
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/test/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "../version");
-        properties.setProperty(Configuration.OS_VERSION_KEY, "centos6");
-        
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    
-    String STACK_ID = "HDP-2.0.1";
-    String CLUSTER_NAME = "c1";
-    String HOST1 = "h1";
-    String HOST2 = "h2";
-    
-    try {
-      Clusters clusters = injector.getInstance(Clusters.class);
-      
-      clusters.addHost(HOST1);
-      Host host = clusters.getHost(HOST1);
-      host.setOsType("centos6");
-      host.persist();      
-      
-      clusters.addHost(HOST2);
-      host = clusters.getHost(HOST2);
-      host.setOsType("centos6");
-      host.persist();      
-
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-    
-      ClusterRequest cr = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
-      amc.createCluster(cr);
-      
-      ConfigurationRequest configRequest = new ConfigurationRequest(CLUSTER_NAME, "global", "version1",
-          new HashMap<String, String>() {{ put("a", "b"); }});
-      cr.setDesiredConfig(configRequest);
-      amc.updateClusters(Collections.singleton(cr), new HashMap<String, String>());
-      
-      // add some hosts
-      Set<HostRequest> hrs = new HashSet<HostRequest>();
-      hrs.add(new HostRequest(HOST1, CLUSTER_NAME, null));
-      amc.createHosts(hrs);
-      
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HDFS_CLIENT", HOST1, null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-      
-      ActionRequest ar = new ActionRequest(CLUSTER_NAME, "HDFS", Role.HDFS_SERVICE_CHECK.name(), new HashMap<String, String>());
-      amc.createActions(Collections.singleton(ar), null);
-  
-      // change mind, delete the cluster
-      amc.deleteCluster(cr);
-      
-      assertNotNull(clusters.getHost(HOST1));
-      assertNotNull(clusters.getHost(HOST2));
-      
-      HostDAO dao = injector.getInstance(HostDAO.class);
-      
-      assertNotNull(dao.findByName(HOST1));
-      assertNotNull(dao.findByName(HOST2));
-      
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }     
-    
-  }
-
-  @Test
-  public void testApplyConfigurationWithTheSameTag() {
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/main/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-            "target/version");
-        properties.setProperty(Configuration.OS_VERSION_KEY,
-            "centos6");
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    String tag = "version1";
-    String type = "core-site";
-    AmbariException exception = null;
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-      Gson gson = new Gson();
-
-      clusters.addHost("host1");
-      clusters.addHost("host2");
-      clusters.addHost("host3");
-      Host host = clusters.getHost("host1");
-      host.setOsType("centos6");
-      host.persist();
-      host = clusters.getHost("host2");
-      host.setOsType("centos6");
-      host.persist();
-      host = clusters.getHost("host3");
-      host.setOsType("centos6");
-      host.persist();
-
-      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
-      amc.createCluster(clusterRequest);
-
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-
-      amc.createServices(serviceRequests);
-
-      Type confType = new TypeToken<Map<String, String>>() {
-      }.getType();
-
-      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", type, tag,
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType));
-      amc.createConfiguration(configurationRequest);
-
-      amc.createConfiguration(configurationRequest);
-    } catch (AmbariException e) {
-      exception = e;
-    }
-
-    assertNotNull(exception);
-    String exceptionMessage = MessageFormat.format("Configuration with tag ''{0}'' exists for ''{1}''",
-        tag, type);
-    assertEquals(exceptionMessage, exception.getMessage());
-  }
 }


Mime
View raw message