ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rle...@apache.org
Subject [2/2] ambari git commit: AMBARI-9360. Implement unkerberize for kerberized cluster (rlevas)
Date Tue, 03 Feb 2015 16:17:48 GMT
AMBARI-9360. Implement unkerberize for kerberized cluster (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/02ccb17f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/02ccb17f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/02ccb17f

Branch: refs/heads/trunk
Commit: 02ccb17fab9357f0b130b5b127aebfc3b6fb2dd7
Parents: 18136fb
Author: Robert Levas <rlevas@hortonworks.com>
Authored: Tue Feb 3 11:17:25 2015 -0500
Committer: Robert Levas <rlevas@hortonworks.com>
Committed: Tue Feb 3 11:17:38 2015 -0500

----------------------------------------------------------------------
 .../server/controller/KerberosHelper.java       |  126 +-
 .../ambari/server/state/ConfigHelper.java       |  188 +--
 .../server/controller/KerberosHelperTest.java   |  269 ++++-
 .../ambari/server/state/ConfigHelperTest.java   | 1108 ++++++++++--------
 4 files changed, 1134 insertions(+), 557 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/02ccb17f/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index fd1fb57..e8f475f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -46,7 +46,24 @@ import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
 import org.apache.ambari.server.serveraction.ServerAction;
-import org.apache.ambari.server.serveraction.kerberos.*;
+import org.apache.ambari.server.serveraction.kerberos.CreateKeytabFilesServerAction;
+import org.apache.ambari.server.serveraction.kerberos.CreatePrincipalsServerAction;
+import org.apache.ambari.server.serveraction.kerberos.FinalizeKerberosServerAction;
+import org.apache.ambari.server.serveraction.kerberos.KDCType;
+import org.apache.ambari.server.serveraction.kerberos.KerberosActionDataFile;
+import org.apache.ambari.server.serveraction.kerberos.KerberosActionDataFileBuilder;
+import org.apache.ambari.server.serveraction.kerberos.KerberosAdminAuthenticationException;
+import org.apache.ambari.server.serveraction.kerberos.KerberosConfigDataFile;
+import org.apache.ambari.server.serveraction.kerberos.KerberosConfigDataFileBuilder;
+import org.apache.ambari.server.serveraction.kerberos.KerberosCredential;
+import org.apache.ambari.server.serveraction.kerberos.KerberosKDCConnectionException;
+import org.apache.ambari.server.serveraction.kerberos.KerberosLDAPContainerException;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationException;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandlerFactory;
+import org.apache.ambari.server.serveraction.kerberos.KerberosRealmException;
+import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
+import org.apache.ambari.server.serveraction.kerberos.UpdateKerberosConfigsServerAction;
 import org.apache.ambari.server.stageplanner.RoleGraph;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -55,10 +72,12 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
@@ -656,7 +675,9 @@ public class KerberosHelper {
       // If all goes well, set all services to _desire_ to be secured or unsecured, depending
on handler
       if (desiredSecurityState != null) {
         for (Service service : services.values()) {
-          service.setSecurityState(desiredSecurityState);
+          if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey(service.getName()))
{
+            service.setSecurityState(desiredSecurityState);
+          }
         }
       }
     }
@@ -1557,7 +1578,7 @@ public class KerberosHelper {
     public boolean shouldProcess(SecurityState desiredSecurityState, ServiceComponentHost
sch) throws AmbariException {
       return (desiredSecurityState == SecurityState.UNSECURED) &&
           (maintenanceStateHelper.getEffectiveState(sch) == MaintenanceState.OFF) &&
-          (sch.getSecurityState() != SecurityState.UNSECURED) &&
+          ((sch.getDesiredSecurityState() != SecurityState.UNSECURED) || (sch.getSecurityState()
!= SecurityState.UNSECURED)) &&
           (sch.getSecurityState() != SecurityState.UNSECURING);
     }
 
@@ -1583,13 +1604,98 @@ public class KerberosHelper {
                              ServiceComponentHostServerActionEvent event,
                              RoleCommandOrder roleCommandOrder, KerberosDetails kerberosDetails,
                              File dataDirectory, RequestStageContainer requestStageContainer,
-                             List<ServiceComponentHost> serviceComponentHosts) {
-      // TODO (rlevas): If there are principals, keytabs, and configurations to process,
setup the following sages:
-      //  1) remove principals
-      //  2) remove keytab files
-      //  3) update configurations
-      //  3) restart services
-      return requestStageContainer == null ? -1 : requestStageContainer.getLastStageId();
+                             List<ServiceComponentHost> serviceComponentHosts) throws
AmbariException {
+      //  1) revert configurations
+
+      // If a RequestStageContainer does not already exist, create a new one...
+      if (requestStageContainer == null) {
+        requestStageContainer = new RequestStageContainer(
+            actionManager.getNextRequestId(),
+            null,
+            requestFactory,
+            actionManager);
+      }
+
+      Map<String, String> commandParameters = new HashMap<String, String>();
+      commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
+      commandParameters.put(KerberosServerAction.DEFAULT_REALM, kerberosDetails.getDefaultRealm());
+      commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
+      commandParameters.put(KerberosServerAction.ADMINISTRATOR_CREDENTIAL, getEncryptedAdministratorCredentials(cluster));
+
+      // If there are configurations to set, create a (temporary) data file to store the
configuration
+      // updates and fill it will the relevant configurations.
+      if (!kerberosConfigurations.isEmpty()) {
+        File configFile = new File(dataDirectory, KerberosConfigDataFile.DATA_FILE_NAME);
+        KerberosConfigDataFileBuilder kerberosConfDataFileBuilder = null;
+
+        if (serviceComponentHosts != null) {
+          Set<String> visitedServices = new HashSet<String>();
+
+          for (ServiceComponentHost sch : serviceComponentHosts) {
+            String serviceName = sch.getServiceName();
+
+            if (!visitedServices.contains(serviceName)) {
+              StackId stackVersion = sch.getStackVersion();
+
+              visitedServices.add(serviceName);
+
+              if (stackVersion != null) {
+                Set<PropertyInfo> serviceProperties = configHelper.getServiceProperties(stackVersion,
serviceName, true);
+
+                if (serviceProperties != null) {
+                  for (PropertyInfo propertyInfo : serviceProperties) {
+                    String filename = propertyInfo.getFilename();
+
+                    if (filename != null) {
+                      Map<String, String> kerberosConfiguration = kerberosConfigurations.get(ConfigHelper.fileNameToConfigType(filename));
+
+                      if ((kerberosConfiguration != null) && (kerberosConfiguration.containsKey(propertyInfo.getName())))
{
+                        kerberosConfiguration.put(propertyInfo.getName(), propertyInfo.getValue());
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+
+        try {
+          kerberosConfDataFileBuilder = new KerberosConfigDataFileBuilder(configFile);
+
+          for (Map.Entry<String, Map<String, String>> entry : kerberosConfigurations.entrySet())
{
+            String type = entry.getKey();
+            Map<String, String> properties = entry.getValue();
+
+            if (properties != null) {
+              for (Map.Entry<String, String> configTypeEntry : properties.entrySet())
{
+                kerberosConfDataFileBuilder.addRecord(type,
+                    configTypeEntry.getKey(),
+                    configTypeEntry.getValue());
+              }
+            }
+          }
+        } catch (IOException e) {
+          String message = String.format("Failed to write kerberos configurations file -
%s", configFile.getAbsolutePath());
+          LOG.error(message);
+          throw new AmbariException(message, e);
+        } finally {
+          if (kerberosConfDataFileBuilder != null) {
+            try {
+              kerberosConfDataFileBuilder.close();
+            } catch (IOException e) {
+              LOG.warn("Failed to close the kerberos configurations file writer", e);
+            }
+          }
+        }
+      }
+
+      // *****************************************************************
+      // Create stage to update configurations of services
+      addUpdateConfigurationsStage(cluster, clusterHostInfoJson, hostParamsJson, event, commandParameters,
+          roleCommandOrder, requestStageContainer);
+
+      return requestStageContainer.getLastStageId();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/02ccb17f/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 122e0a3..a14531e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -66,7 +66,7 @@ public class ConfigHelper {
   private final Cache<ServiceComponentHost, Boolean> staleConfigsCache;
 
   private static final Logger LOG =
-    LoggerFactory.getLogger(ConfigHelper.class);
+      LoggerFactory.getLogger(ConfigHelper.class);
 
   @Inject
   public ConfigHelper(Clusters c, AmbariMetaInfo metaInfo, Configuration configuration, ClusterDAO
clusterDAO) {
@@ -75,12 +75,13 @@ public class ConfigHelper {
     this.clusterDAO = clusterDAO;
     STALE_CONFIGS_CACHE_ENABLED = configuration.isStaleConfigCacheEnabled();
     staleConfigsCache = CacheBuilder.newBuilder().
-      expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
+        expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
   }
 
   /**
    * Gets the desired tags for a cluster and host
-   * @param cluster the cluster
+   *
+   * @param cluster  the cluster
    * @param hostName the host name
    * @return a map of tag type to tag names with overrides
    * @throws AmbariException
@@ -95,7 +96,8 @@ public class ConfigHelper {
 
   /**
    * Gets the desired tags for a cluster and overrides for a host
-   * @param cluster the cluster
+   *
+   * @param cluster             the cluster
    * @param hostConfigOverrides the host overrides applied using config groups
    * @return a map of tag type to tag names with overrides
    */
@@ -104,7 +106,7 @@ public class ConfigHelper {
 
     Map<String, DesiredConfig> clusterDesired = (cluster == null) ? new HashMap<String,
DesiredConfig>() : cluster.getDesiredConfigs();
 
-    Map<String, Map<String,String>> resolved = new TreeMap<String, Map<String,
String>>();
+    Map<String, Map<String, String>> resolved = new TreeMap<String, Map<String,
String>>();
 
     // Do not use host component config mappings.  Instead, the rules are:
     // 1) Use the cluster desired config
@@ -152,7 +154,7 @@ public class ConfigHelper {
    * @return {type : {key, value}}
    */
   public Map<String, Map<String, String>> getEffectiveConfigProperties(
-    Cluster cluster, Map<String, Map<String, String>> desiredTags) {
+      Cluster cluster, Map<String, Map<String, String>> desiredTags) {
 
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String,
String>>();
 
@@ -178,7 +180,7 @@ public class ConfigHelper {
           // Now merge overrides
           for (Entry<String, String> overrideEntry : tags.entrySet()) {
             Config overrideConfig = cluster.getConfig(type,
-              overrideEntry.getValue());
+                overrideEntry.getValue());
 
             if (overrideConfig != null) {
               propertyMap = getMergedConfig(propertyMap, overrideConfig.getProperties());
@@ -313,7 +315,7 @@ public class ConfigHelper {
   }
 
   public void applyCustomConfig(Map<String, Map<String, String>> configurations,
-      String type, String name, String value, Boolean deleted) {
+                                String type, String name, String value, Boolean deleted)
{
     if (!configurations.containsKey(type)) {
       configurations.put(type, new HashMap<String, String>());
     }
@@ -330,26 +332,27 @@ public class ConfigHelper {
    * known actual configs are different than what is set on the cluster (the desired).
    * The following logic is applied:
    * <ul>
-   *   <li>Desired type does not exist on the SCH (actual)
-   *     <ul>
-   *       <li>Type does not exist on the stack: <code>false</code></li>
-   *       <li>Type exists on the stack: <code>true</code> if the config
key is on the stack.
-   *         otherwise <code>false</code></li>
-   *     </ul>
-   *   </li>
-   *   <li> Desired type exists for the SCH
-   *     <ul>
-   *       <li>Desired tags already set for the SCH (actual): <code>false</code></li>
-   *       <li>Desired tags DO NOT match SCH: <code>true</code> if the
changed keys
-   *         exist on the stack, otherwise <code>false</code></li>
-   *     </ul>
-   *   </li>
+   * <li>Desired type does not exist on the SCH (actual)
+   * <ul>
+   * <li>Type does not exist on the stack: <code>false</code></li>
+   * <li>Type exists on the stack: <code>true</code> if the config key
is on the stack.
+   * otherwise <code>false</code></li>
+   * </ul>
+   * </li>
+   * <li> Desired type exists for the SCH
+   * <ul>
+   * <li>Desired tags already set for the SCH (actual): <code>false</code></li>
+   * <li>Desired tags DO NOT match SCH: <code>true</code> if the changed
keys
+   * exist on the stack, otherwise <code>false</code></li>
    * </ul>
+   * </li>
+   * </ul>
+   *
    * @param @ServiceComponentHost
    * @return <code>true</code> if the actual configs are stale
    */
   public boolean isStaleConfigs(ServiceComponentHost sch) throws AmbariException {
-    Boolean stale  = null;
+    Boolean stale = null;
 
     if (STALE_CONFIGS_CACHE_ENABLED) {
       stale = staleConfigsCache.getIfPresent(sch);
@@ -364,6 +367,7 @@ public class ConfigHelper {
 
   /**
    * Invalidates cached isStale values for hostname
+   *
    * @param hostname
    */
   public void invalidateStaleConfigsCache(String hostname) {
@@ -387,6 +391,7 @@ public class ConfigHelper {
 
   /**
    * Invalidates cached isStale value for sch
+   *
    * @param sch
    */
   public void invalidateStaleConfigsCache(ServiceComponentHost sch) {
@@ -395,15 +400,16 @@ public class ConfigHelper {
 
   /**
    * Remove configs by type
+   *
    * @param type config Type
    */
   @Transactional
   public void removeConfigsByType(Cluster cluster, String type) {
     Set<String> globalVersions = cluster.getConfigsByType(type).keySet();
 
-    for(String version:globalVersions) {
+    for (String version : globalVersions) {
       ClusterConfigEntity clusterConfigEntity = clusterDAO.findConfig
-        (cluster.getClusterId(), type, version);
+          (cluster.getClusterId(), type, version);
 
       clusterDAO.removeConfig(clusterConfigEntity);
     }
@@ -411,6 +417,7 @@ public class ConfigHelper {
 
   /**
    * Gets all the config dictionary where property with the given name is present in stack
definitions
+   *
    * @param stackId
    * @param propertyName
    */
@@ -420,13 +427,13 @@ public class ConfigHelper {
 
     Set<String> result = new HashSet<String>();
 
-    for(Service service : clusters.getCluster(clusterName).getServices().values()) {
+    for (Service service : clusters.getCluster(clusterName).getServices().values()) {
       Set<PropertyInfo> stackProperties = ambariMetaInfo.getServiceProperties(stack.getName(),
stack.getVersion(), service.getName());
       Set<PropertyInfo> stackLevelProperties = ambariMetaInfo.getStackProperties(stack.getName(),
stack.getVersion());
       stackProperties.addAll(stackLevelProperties);
 
       for (PropertyInfo stackProperty : stackProperties) {
-        if(stackProperty.getName().equals(propertyName)) {
+        if (stackProperty.getName().equals(propertyName)) {
           String configType = fileNameToConfigType(stackProperty.getFilename());
 
           result.add(configType);
@@ -443,14 +450,15 @@ public class ConfigHelper {
 
     Set<String> result = new HashSet<String>();
 
-    for(Service service : cluster.getServices().values()) {
+    for (Service service : cluster.getServices().values()) {
       Set<PropertyInfo> serviceProperties = ambariMetaInfo.getServiceProperties(stack.getName(),
stack.getVersion(), service.getName());
       for (PropertyInfo serviceProperty : serviceProperties) {
-        if(serviceProperty.getPropertyTypes().contains(propertyType)) {
+        if (serviceProperty.getPropertyTypes().contains(propertyType)) {
           String stackPropertyConfigType = fileNameToConfigType(serviceProperty.getFilename());
           try {
             result.add(cluster.getDesiredConfigByType(stackPropertyConfigType).getProperties().get(serviceProperty.getName()));
-          } catch(Exception ex) {}
+          } catch (Exception ex) {
+          }
         }
       }
     }
@@ -458,7 +466,7 @@ public class ConfigHelper {
     Set<PropertyInfo> stackProperties = ambariMetaInfo.getStackProperties(stack.getName(),
stack.getVersion());
 
     for (PropertyInfo stackProperty : stackProperties) {
-      if(stackProperty.getPropertyTypes().contains(propertyType)) {
+      if (stackProperty.getPropertyTypes().contains(propertyType)) {
         String stackPropertyConfigType = fileNameToConfigType(stackProperty.getFilename());
         result.add(cluster.getDesiredConfigByType(stackPropertyConfigType).getProperties().get(stackProperty.getName()));
       }
@@ -472,7 +480,7 @@ public class ConfigHelper {
     StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(),
         stackId.getStackVersion());
 
-    for(ServiceInfo serviceInfo:stack.getServices()) {
+    for (ServiceInfo serviceInfo : stack.getServices()) {
       Set<PropertyInfo> serviceProperties = ambariMetaInfo.getServiceProperties(stack.getName(),
stack.getVersion(), serviceInfo.getName());
       Set<PropertyInfo> stackProperties = ambariMetaInfo.getStackProperties(stack.getName(),
stack.getVersion());
       serviceProperties.addAll(stackProperties);
@@ -480,7 +488,7 @@ public class ConfigHelper {
       for (PropertyInfo stackProperty : serviceProperties) {
         String stackPropertyConfigType = fileNameToConfigType(stackProperty.getFilename());
 
-        if(stackProperty.getName().equals(propertyName) && stackPropertyConfigType.equals(configType))
{
+        if (stackProperty.getName().equals(propertyName) && stackPropertyConfigType.equals(configType))
{
           return stackProperty.getValue();
         }
       }
@@ -496,19 +504,16 @@ public class ConfigHelper {
    * as {{hdfs-site/foo}} and return the value of {@code foo} defined in
    * {@code hdfs-site}.
    *
-   * @param cluster
-   *          the cluster to use when rendering the placeholder value (not
-   *          {@code null}).
-   * @param placeholder
-   *          the placeholder value, such as {{hdfs-site/foobar}} (not
-   *          {@code null} )
+   * @param cluster     the cluster to use when rendering the placeholder value (not
+   *                    {@code null}).
+   * @param placeholder the placeholder value, such as {{hdfs-site/foobar}} (not
+   *                    {@code null} )
    * @return the configuration value, or {@code null} if none.
-   * @throws AmbariException
-   *           if there was a problem parsing the placeholder or retrieving the
-   *           referenced value.
+   * @throws AmbariException if there was a problem parsing the placeholder or retrieving
the
+   *                         referenced value.
    */
   public String getPlaceholderValueFromDesiredConfigurations(Cluster cluster,
-      String placeholder) {
+                                                             String placeholder) {
     // remove the {{ and }} from the placholder
     if (placeholder.startsWith("{{") && placeholder.endsWith("}}")) {
       placeholder = placeholder.substring(2, placeholder.length() - 2).trim();
@@ -531,7 +536,7 @@ public class ConfigHelper {
     Map<String, String> configurationProperties = config.getProperties();
     if (null != configurationProperties) {
       String value = configurationProperties.get(propertyName);
-      if( null != value ) {
+      if (null != value) {
         return value;
       }
     }
@@ -543,13 +548,13 @@ public class ConfigHelper {
     StackId stackId = cluster.getCurrentStackVersion();
     StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
 
-    for(ServiceInfo serviceInfo:stack.getServices()) {
+    for (ServiceInfo serviceInfo : stack.getServices()) {
       Set<PropertyInfo> serviceProperties = ambariMetaInfo.getServiceProperties(stack.getName(),
stack.getVersion(), serviceInfo.getName());
 
       for (PropertyInfo stackProperty : serviceProperties) {
         String stackPropertyConfigType = fileNameToConfigType(stackProperty.getFilename());
 
-        if(stackProperty.getName().equals(propertyName) && stackPropertyConfigType.equals(configType))
{
+        if (stackProperty.getName().equals(propertyName) && stackPropertyConfigType.equals(configType))
{
           return serviceInfo;
         }
       }
@@ -560,10 +565,55 @@ public class ConfigHelper {
   }
 
   public Set<PropertyInfo> getServiceProperties(Cluster cluster, String serviceName)
throws AmbariException {
-    StackId stackId = cluster.getCurrentStackVersion();
-    StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+    // The original implementation of this method is to return all properties regardless
of whether
+    // they should be excluded or not.  By setting removeExcluded to false in the method
invocation
+    // below, no attempt will be made to remove properties that exist in excluded types.
+    return getServiceProperties(cluster.getCurrentStackVersion(), serviceName, false);
+  }
+
+  /**
+   * Retrieves a Set of PropertyInfo objects containing the relevant properties for the requested
+   * service.
+   * <p/>
+   * If <code>removeExcluded</code> is <code>true</code>, the service's
excluded configuration types
+   * are used to prune off PropertyInfos that should be ignored; else if <code>false</code>,
all
+   * PropertyInfos will be returned.
+   *
+   * @param stackId        a StackId declaring the relevant stack
+   * @param serviceName    a String containing the requested service's name
+   * @param removeExcluded a boolean value indicating whether to remove properties from excluded
+   *                       configuration types (<code>true</code>) or return
the complete set of properties regardless of exclusions (<code>false</code>)
+   * @return a Set of PropertyInfo objects for the requested service
+   * @throws AmbariException if the requested stack or the requested service is not found
+   */
+  public Set<PropertyInfo> getServiceProperties(StackId stackId, String serviceName,
boolean removeExcluded)
+      throws AmbariException {
+    ServiceInfo service = ambariMetaInfo.getService(stackId.getStackName(), stackId.getStackVersion(),
serviceName);
+    Set<PropertyInfo> properties = new HashSet<PropertyInfo>(service.getProperties());
+
+    if (removeExcluded) {
+      Set<String> excludedConfigTypes = service.getExcludedConfigTypes();
+
+      // excludedConfigTypes can be null since org.apache.ambari.server.state.ServiceInfo.setExcludedConfigTypes()
+      // allows for null values
+      if ((excludedConfigTypes != null) && !excludedConfigTypes.isEmpty()) {
+        // Iterate through the set of found PropertyInfo instances and remove ones that should
be
+        // excluded.
+        Iterator<PropertyInfo> iterator = properties.iterator();
+
+        while (iterator.hasNext()) {
+          PropertyInfo propertyInfo = iterator.next();
+
+          // If the config type for the current PropertyInfo is containing within an excluded
type,
+          // remove it from the set of properties being returned
+          if (excludedConfigTypes.contains(ConfigHelper.fileNameToConfigType(propertyInfo.getFilename())))
{
+            iterator.remove();
+          }
+        }
+      }
+    }
 
-    return ambariMetaInfo.getServiceProperties(stack.getName(), stack.getVersion(), serviceName);
+    return properties;
   }
 
   public Set<PropertyInfo> getStackProperties(Cluster cluster) throws AmbariException
{
@@ -592,9 +642,9 @@ public class ConfigHelper {
    * @throws AmbariException
    */
   public void createConfigType(Cluster cluster,
-      AmbariManagementController controller, String configType,
-      Map<String, String> properties, String authenticatedUserName,
-      String serviceVersionNote) throws AmbariException {
+                               AmbariManagementController controller, String configType,
+                               Map<String, String> properties, String authenticatedUserName,
+                               String serviceVersionNote) throws AmbariException {
 
     String tag = "version1";
     if (cluster.getConfigsByType(configType) != null) {
@@ -624,31 +674,31 @@ public class ConfigHelper {
    * Since global configs are deprecated since 1.7.0, but still supported.
    * We should automatically map any globals used, to *-env dictionaries.
    *
-   * @param configurations  map of configurations keyed by type
+   * @param configurations map of configurations keyed by type
    */
   public void moveDeprecatedGlobals(StackId stackId, Map<String, Map<String, String>>
configurations, String clusterName) {
     Map<String, String> globalConfigurations = new HashMap<String, String>();
 
-    if(configurations.get(Configuration.GLOBAL_CONFIG_TAG) == null ||
+    if (configurations.get(Configuration.GLOBAL_CONFIG_TAG) == null ||
         configurations.get(Configuration.GLOBAL_CONFIG_TAG).size() == 0) {
       return;
     }
 
     globalConfigurations.putAll(configurations.get(Configuration.GLOBAL_CONFIG_TAG));
 
-    if(globalConfigurations!=null && globalConfigurations.size() != 0) {
+    if (globalConfigurations != null && globalConfigurations.size() != 0) {
       LOG.warn("Global configurations are deprecated, "
           + "please use *-env");
     }
 
-    for(Map.Entry<String, String> property:globalConfigurations.entrySet()) {
+    for (Map.Entry<String, String> property : globalConfigurations.entrySet()) {
       String propertyName = property.getKey();
       String propertyValue = property.getValue();
 
       Set<String> newConfigTypes = null;
-      try{
+      try {
         newConfigTypes = findConfigTypesByPropertyName(stackId, propertyName, clusterName);
-      } catch(AmbariException e) {
+      } catch (AmbariException e) {
         LOG.error("Exception while getting configurations from the stacks", e);
         return;
       }
@@ -656,31 +706,31 @@ public class ConfigHelper {
       newConfigTypes.remove(Configuration.GLOBAL_CONFIG_TAG);
 
       String newConfigType = null;
-      if(newConfigTypes.size() > 0) {
+      if (newConfigTypes.size() > 0) {
         newConfigType = newConfigTypes.iterator().next();
       } else {
         newConfigType = UpgradeCatalog170.getAdditionalMappingGlobalToEnv().get(propertyName);
       }
 
-      if(newConfigType==null) {
+      if (newConfigType == null) {
         LOG.warn("Cannot find where to map " + propertyName + " from " + Configuration.GLOBAL_CONFIG_TAG
+
-            " (value="+propertyValue+")");
+            " (value=" + propertyValue + ")");
         continue;
       }
 
       LOG.info("Mapping config " + propertyName + " from " + Configuration.GLOBAL_CONFIG_TAG
+
           " to " + newConfigType +
-          " (value="+propertyValue+")");
+          " (value=" + propertyValue + ")");
 
       configurations.get(Configuration.GLOBAL_CONFIG_TAG).remove(propertyName);
 
-      if(!configurations.containsKey(newConfigType)) {
+      if (!configurations.containsKey(newConfigType)) {
         configurations.put(newConfigType, new HashMap<String, String>());
       }
       configurations.get(newConfigType).put(propertyName, propertyValue);
     }
 
-    if(configurations.get(Configuration.GLOBAL_CONFIG_TAG).size() == 0) {
+    if (configurations.get(Configuration.GLOBAL_CONFIG_TAG).size() == 0) {
       configurations.remove(Configuration.GLOBAL_CONFIG_TAG);
     }
   }
@@ -691,7 +741,7 @@ public class ConfigHelper {
       return true;
     }
 
-    Map <String, HostConfig> actual = sch.getActualConfigs();
+    Map<String, HostConfig> actual = sch.getActualConfigs();
     if (null == actual || actual.isEmpty()) {
       return false;
     }
@@ -750,7 +800,7 @@ public class ConfigHelper {
           // and if it applies
           // to the service
           Collection<String> changed = findChangedKeys(cluster, type,
-            tags.values(), actualTags.values());
+              tags.values(), actualTags.values());
           if (serviceInfo.hasDependencyAndPropertyFor(type, changed)) {
             stale = true;
           }
@@ -792,7 +842,7 @@ public class ConfigHelper {
    * for the type.
    */
   private boolean hasPropertyFor(StackId stack, String type,
-      Collection<String> keys) throws AmbariException {
+                                 Collection<String> keys) throws AmbariException {
 
     for (ServiceInfo svc : ambariMetaInfo.getServices(stack.getStackName(),
         stack.getStackVersion()).values()) {
@@ -810,7 +860,7 @@ public class ConfigHelper {
    * @return the keys that have changed values
    */
   private Collection<String> findChangedKeys(Cluster cluster, String type,
-      Collection<String> desiredTags, Collection<String> actualTags) {
+                                             Collection<String> desiredTags, Collection<String>
actualTags) {
 
     Map<String, String> desiredValues = new HashMap<String, String>();
     Map<String, String> actualValues = new HashMap<String, String>();
@@ -881,7 +931,7 @@ public class ConfigHelper {
   }
 
   /**
-   * @return  the list of combined config property names
+   * @return the list of combined config property names
    */
   private Collection<String> mergeKeyNames(Cluster cluster, String type, Collection<String>
tags) {
     Set<String> names = new HashSet<String>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/02ccb17f/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 3532e69..9cc7bbb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -109,7 +109,7 @@ import static org.powermock.api.easymock.PowerMock.verifyAll;
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(KerberosDescriptor.class)
-@PowerMockIgnore({"javax.crypto.*"})
+@PowerMockIgnore({"javax.crypto.*", "org.apache.log4j.*"})
 @SuppressWarnings("unchecked")
 public class KerberosHelperTest {
 
@@ -338,21 +338,44 @@ public class KerberosHelperTest {
     testRegenerateKeytabs(new KerberosCredential("principal", "password", "keytab"), false,
false);
   }
 
+  @Test
+  public void testDisableKerberos() throws Exception {
+    testDisableKerberos(new KerberosCredential("principal", "password", "keytab"), false,
true);
+  }
+
   private void testEnableKerberos(final KerberosCredential kerberosCredential,
                                   boolean getClusterDescriptor,
                                   boolean getStackDescriptor) throws Exception {
 
     KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
 
-    final ServiceComponentHost sch1 = createNiceMock(ServiceComponentHost.class);
+    final StackId stackVersion = createNiceMock(StackId.class);
+
+    final ServiceComponentHost sch1 = createMock(ServiceComponentHost.class);
     expect(sch1.getServiceName()).andReturn("SERVICE1").once();
     expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").once();
     expect(sch1.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
+    expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
+    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
+    expect(sch1.getHostName()).andReturn("host1").anyTimes();
+
+    sch1.setDesiredSecurityState(SecurityState.SECURED_KERBEROS);
+    expect(expectLastCall()).once();
+    sch1.setSecurityState(SecurityState.SECURING);
+    expect(expectLastCall()).once();
 
-    final ServiceComponentHost sch2 = createNiceMock(ServiceComponentHost.class);
+    final ServiceComponentHost sch2 = createMock(ServiceComponentHost.class);
     expect(sch2.getServiceName()).andReturn("SERVICE2").once();
     expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").once();
     expect(sch2.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
+    expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
+    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
+    expect(sch2.getHostName()).andReturn("host1").anyTimes();
+
+    sch2.setDesiredSecurityState(SecurityState.SECURED_KERBEROS);
+    expect(expectLastCall()).once();
+    sch2.setSecurityState(SecurityState.SECURING);
+    expect(expectLastCall()).once();
 
     final Host host = createNiceMock(Host.class);
     expect(host.getHostName()).andReturn("host1").once();
@@ -577,6 +600,246 @@ public class KerberosHelperTest {
     verifyAll();
   }
 
+  private void testDisableKerberos(final KerberosCredential kerberosCredential,
+                                   boolean getClusterDescriptor,
+                                   boolean getStackDescriptor) throws Exception {
+
+    KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
+
+    final StackId stackVersion = createNiceMock(StackId.class);
+
+    final ServiceComponentHost sch1 = createMock(ServiceComponentHost.class);
+    expect(sch1.getServiceName()).andReturn("SERVICE1").times(2);
+    expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").once();
+    expect(sch1.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
+    expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
+    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
+
+    sch1.setDesiredSecurityState(SecurityState.UNSECURED);
+    expect(expectLastCall()).once();
+    sch1.setSecurityState(SecurityState.UNSECURING);
+    expect(expectLastCall()).once();
+
+    final ServiceComponentHost sch2 = createMock(ServiceComponentHost.class);
+    expect(sch2.getServiceName()).andReturn("SERVICE2").times(2);
+    expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").once();
+    expect(sch2.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
+    expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
+    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
+
+    sch2.setDesiredSecurityState(SecurityState.UNSECURED);
+    expect(expectLastCall()).once();
+    sch2.setSecurityState(SecurityState.UNSECURING);
+    expect(expectLastCall()).once();
+
+    final Host host = createNiceMock(Host.class);
+    expect(host.getHostName()).andReturn("host1").once();
+    expect(host.getState()).andReturn(HostState.HEALTHY).once();
+
+    final Service service1 = createStrictMock(Service.class);
+    expect(service1.getName()).andReturn("SERVICE1").anyTimes();
+    expect(service1.getServiceComponents())
+        .andReturn(Collections.<String, ServiceComponent>emptyMap())
+        .once();
+    service1.setSecurityState(SecurityState.UNSECURED);
+    expectLastCall().once();
+
+    final Service service2 = createStrictMock(Service.class);
+    expect(service2.getName()).andReturn("SERVICE2").anyTimes();
+    expect(service2.getServiceComponents())
+        .andReturn(Collections.<String, ServiceComponent>emptyMap())
+        .once();
+    service2.setSecurityState(SecurityState.UNSECURED);
+    expectLastCall().once();
+
+    final Map<String, String> kerberosEnvProperties = createNiceMock(Map.class);
+    // TODO: (rlevas) Add when AMBARI 9121 is complete
+    // expect(kerberosEnvProperties.get("kdc_type")).andReturn("mit-kdc").once();
+
+    final Config kerberosEnvConfig = createNiceMock(Config.class);
+    expect(kerberosEnvConfig.getProperties()).andReturn(kerberosEnvProperties).once();
+
+    final Map<String, String> krb5ConfProperties = createNiceMock(Map.class);
+    expect(krb5ConfProperties.get("kdc_type")).andReturn("mit-kdc").once();
+    expect(krb5ConfProperties.get("realm")).andReturn("FOOBAR.COM").once();
+
+    final Config krb5ConfConfig = createNiceMock(Config.class);
+    // TODO: (rlevas) Remove when AMBARI 9121 is complete
+    expect(krb5ConfConfig.getProperties()).andReturn(krb5ConfProperties).once();
+
+    final MaintenanceStateHelper maintenanceStateHelper = injector.getInstance(MaintenanceStateHelper.class);
+    expect(maintenanceStateHelper.getEffectiveState(anyObject(ServiceComponentHost.class)))
+        .andReturn(MaintenanceState.OFF).anyTimes();
+
+    final Cluster cluster = createNiceMock(Cluster.class);
+    expect(cluster.getSecurityType()).andReturn(SecurityType.NONE).once();
+    expect(cluster.getDesiredConfigByType("krb5-conf")).andReturn(krb5ConfConfig).once();
+    expect(cluster.getDesiredConfigByType("kerberos-env")).andReturn(kerberosEnvConfig).once();
+    expect(cluster.getClusterName()).andReturn("c1").anyTimes();
+    expect(cluster.getServices())
+        .andReturn(new HashMap<String, Service>() {
+          {
+            put("SERVICE1", service1);
+            put("SERVICE2", service2);
+          }
+        })
+        .anyTimes();
+    expect(cluster.getServiceComponentHosts("host1"))
+        .andReturn(new ArrayList<ServiceComponentHost>() {
+          {
+            add(sch1);
+            add(sch2);
+          }
+        })
+        .once();
+    expect(cluster.getCurrentStackVersion())
+        .andReturn(new StackId("HDP", "2.2"))
+        .anyTimes();
+    expect(cluster.getSessionAttributes()).andReturn(new HashMap<String, Object>()
{{
+      if (kerberosCredential != null) {
+        put("kerberos_admin/" + KerberosCredential.KEY_NAME_PRINCIPAL, kerberosCredential.getPrincipal());
+        put("kerberos_admin/" + KerberosCredential.KEY_NAME_PASSWORD, kerberosCredential.getPassword());
+        put("kerberos_admin/" + KerberosCredential.KEY_NAME_KEYTAB, kerberosCredential.getKeytab());
+      }
+    }}).anyTimes();
+
+    final Clusters clusters = injector.getInstance(Clusters.class);
+    expect(clusters.getHostsForCluster("c1"))
+        .andReturn(new HashMap<String, Host>() {
+          {
+            put("host1", host);
+          }
+        })
+        .once();
+
+    final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, "host1"))
+        .andReturn(Collections.<String, Map<String, String>>emptyMap())
+        .once();
+    expect(ambariManagementController.getRoleCommandOrder(cluster))
+        .andReturn(createNiceMock(RoleCommandOrder.class))
+        .once();
+
+    final ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+    expect(configHelper.getEffectiveConfigProperties(anyObject(Cluster.class), anyObject(Map.class)))
+        .andReturn(new HashMap<String, Map<String, String>>() {
+          {
+            put("cluster-env", new HashMap<String, String>() {{
+              put("kerberos_domain", "FOOBAR.COM");
+            }});
+          }
+        })
+        .once();
+    expect(configHelper.getEffectiveConfigAttributes(anyObject(Cluster.class), anyObject(Map.class)))
+        .andReturn(Collections.<String, Map<String, Map<String, String>>>emptyMap())
+        .once();
+
+    final KerberosPrincipalDescriptor principalDescriptor1 = createNiceMock(KerberosPrincipalDescriptor.class);
+    expect(principalDescriptor1.getValue()).andReturn("component1/_HOST@${realm}").once();
+    expect(principalDescriptor1.getType()).andReturn(KerberosPrincipalType.SERVICE).once();
+    expect(principalDescriptor1.getConfiguration()).andReturn("service1-site/component1.kerberos.principal").once();
+
+    final KerberosPrincipalDescriptor principalDescriptor2 = createNiceMock(KerberosPrincipalDescriptor.class);
+    expect(principalDescriptor2.getValue()).andReturn("component2/${host}@${realm}").once();
+    expect(principalDescriptor2.getType()).andReturn(KerberosPrincipalType.SERVICE).once();
+    expect(principalDescriptor2.getConfiguration()).andReturn("service2-site/component2.kerberos.principal").once();
+
+    final KerberosKeytabDescriptor keytabDescriptor1 = createNiceMock(KerberosKeytabDescriptor.class);
+    expect(keytabDescriptor1.getFile()).andReturn("${keytab_dir}/service1.keytab").once();
+    expect(keytabDescriptor1.getOwnerName()).andReturn("service1").once();
+    expect(keytabDescriptor1.getOwnerAccess()).andReturn("rw").once();
+    expect(keytabDescriptor1.getGroupName()).andReturn("hadoop").once();
+    expect(keytabDescriptor1.getGroupAccess()).andReturn("").once();
+    expect(keytabDescriptor1.getConfiguration()).andReturn("service1-site/component1.keytab.file").once();
+
+    final KerberosKeytabDescriptor keytabDescriptor2 = createNiceMock(KerberosKeytabDescriptor.class);
+    expect(keytabDescriptor2.getFile()).andReturn("${keytab_dir}/service2.keytab").once();
+    expect(keytabDescriptor2.getOwnerName()).andReturn("service2").once();
+    expect(keytabDescriptor2.getOwnerAccess()).andReturn("rw").once();
+    expect(keytabDescriptor2.getGroupName()).andReturn("hadoop").once();
+    expect(keytabDescriptor2.getGroupAccess()).andReturn("").once();
+    expect(keytabDescriptor2.getConfiguration()).andReturn("service2-site/component2.keytab.file").once();
+
+    final KerberosIdentityDescriptor identityDescriptor1 = createNiceMock(KerberosIdentityDescriptor.class);
+    expect(identityDescriptor1.getPrincipalDescriptor()).andReturn(principalDescriptor1).once();
+    expect(identityDescriptor1.getKeytabDescriptor()).andReturn(keytabDescriptor1).once();
+
+    final KerberosIdentityDescriptor identityDescriptor2 = createNiceMock(KerberosIdentityDescriptor.class);
+    expect(identityDescriptor2.getPrincipalDescriptor()).andReturn(principalDescriptor2).once();
+    expect(identityDescriptor2.getKeytabDescriptor()).andReturn(keytabDescriptor2).once();
+
+    final KerberosComponentDescriptor componentDescriptor1 = createNiceMock(KerberosComponentDescriptor.class);
+    expect(componentDescriptor1.getIdentities(true)).
+        andReturn(new ArrayList<KerberosIdentityDescriptor>() {{
+          add(identityDescriptor1);
+        }}).once();
+
+    final KerberosComponentDescriptor componentDescriptor2 = createNiceMock(KerberosComponentDescriptor.class);
+    expect(componentDescriptor2.getIdentities(true)).
+        andReturn(new ArrayList<KerberosIdentityDescriptor>() {{
+          add(identityDescriptor2);
+        }}).once();
+
+    final KerberosServiceDescriptor serviceDescriptor1 = createNiceMock(KerberosServiceDescriptor.class);
+    expect(serviceDescriptor1.getComponent("COMPONENT1")).andReturn(componentDescriptor1).once();
+
+    final KerberosServiceDescriptor serviceDescriptor2 = createNiceMock(KerberosServiceDescriptor.class);
+    expect(serviceDescriptor2.getComponent("COMPONENT2")).andReturn(componentDescriptor2).once();
+
+    final KerberosDescriptor kerberosDescriptor = createNiceMock(KerberosDescriptor.class);
+    expect(kerberosDescriptor.getService("SERVICE1")).andReturn(serviceDescriptor1).once();
+    expect(kerberosDescriptor.getService("SERVICE2")).andReturn(serviceDescriptor2).once();
+
+    //todo: extract method?
+    if (getClusterDescriptor) {
+      // needed to mock the static method fromJson()
+      setupGetDescriptorFromCluster(kerberosDescriptor);
+    } else if (getStackDescriptor) {
+      setupGetDescriptorFromStack(kerberosDescriptor);
+    }
+    final StageFactory stageFactory = injector.getInstance(StageFactory.class);
+    expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
+        anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyObject(String.class)))
+        .andAnswer(new IAnswer<Stage>() {
+          @Override
+          public Stage answer() throws Throwable {
+            Stage stage = createNiceMock(Stage.class);
+
+            expect(stage.getHostRoleCommands())
+                .andReturn(Collections.<String, Map<String, HostRoleCommand>>emptyMap())
+                .anyTimes();
+            replay(stage);
+            return stage;
+          }
+        })
+        .anyTimes();
+
+    // This is a STRICT mock to help ensure that the end result is what we want.
+    final RequestStageContainer requestStageContainer = createStrictMock(RequestStageContainer.class);
+    // Update Configs Stage
+    expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
+    expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.addStages(anyObject(List.class));
+    expectLastCall().once();
+    // TODO: Add more of these when more stages are added.
+    // Clean-up/Finalize Stage
+    expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
+    expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.addStages(anyObject(List.class));
+    expectLastCall().once();
+
+    replayAll();
+
+    // Needed by infrastructure
+    metaInfo.init();
+
+    kerberosHelper.toggleKerberos(cluster, SecurityType.NONE, !(getClusterDescriptor || getStackDescriptor)
?
+        kerberosDescriptor : null, requestStageContainer);
+
+    verifyAll();
+  }
+
   private void testRegenerateKeytabs(final KerberosCredential kerberosCredential,
                                      boolean getClusterDescriptor,
                                      boolean getStackDescriptor) throws Exception {


Mime
View raw message