ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jonathanhur...@apache.org
Subject [16/23] ambari git commit: AMBARI-21658. Check for config group host mapping left behind after host delete. (swagle)
Date Fri, 04 Aug 2017 14:40:50 GMT
AMBARI-21658. Check for config group host mapping left behind after host delete. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/30bea532
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/30bea532
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/30bea532

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 30bea532c6e80d635ba8fb7b12422e8d936e058e
Parents: d72f329
Author: Siddharth Wagle <swagle@hortonworks.com>
Authored: Thu Aug 3 21:04:30 2017 -0700
Committer: Siddharth Wagle <swagle@hortonworks.com>
Committed: Thu Aug 3 21:04:30 2017 -0700

----------------------------------------------------------------------
 .../checks/DatabaseConsistencyCheckHelper.java  | 113 +++++++
 .../controller/utilities/DatabaseChecker.java   | 305 -------------------
 .../state/configgroup/ConfigGroupImpl.java      |   6 +-
 .../DatabaseConsistencyCheckHelperTest.java     |  77 +++++
 .../utilities/DatabaseCheckerTest.java          | 105 -------
 5 files changed, 195 insertions(+), 411 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/30bea532/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index c6239d8..d546169 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -51,16 +51,23 @@ import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.state.ClientConfigFileDefinition;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.UpgradeState;
+import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -170,6 +177,7 @@ public class DatabaseConsistencyCheckHelper {
       if (fixIssues) {
         fixHostComponentStatesCountEqualsHostComponentsDesiredStates();
         fixClusterConfigsNotMappedToAnyService();
+        fixConfigGroupHostMappings();
       }
       checkSchemaName();
       checkMySQLEngine();
@@ -181,6 +189,7 @@ public class DatabaseConsistencyCheckHelper {
       checkServiceConfigs();
       checkTopologyTables();
       checkForLargeTables();
+      checkConfigGroupHostMapping(true);
       LOG.info("******************************* Check database completed *******************************");
       return checkResult;
     }
@@ -1130,6 +1139,110 @@ public class DatabaseConsistencyCheckHelper {
 
   }
 
+  /**
+   * This method checks if there are any ConfigGroup host mappings with hosts
+   * that are not longer a part of the cluster.
+   */
+  static Map<Long, Set<Long>> checkConfigGroupHostMapping(boolean warnIfFound)
{
+    LOG.info("Checking config group host mappings");
+    Map<Long, Set<Long>> nonMappedHostIds = new HashMap<>();
+    Clusters clusters = injector.getInstance(Clusters.class);
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    StringBuilder output = new StringBuilder("[( ConfigGroup, Service, HostCount) => ");
+
+    if (!MapUtils.isEmpty(clusterMap)) {
+      for (Cluster cluster : clusterMap.values()) {
+        Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+        Map<String, Host> clusterHosts;
+        try {
+          clusterHosts = clusters.getHostsForCluster(cluster.getClusterName());
+        } catch (AmbariException e) {
+          // Why would this ever happen?
+          continue;
+        }
+
+        if (!MapUtils.isEmpty(configGroups) && !MapUtils.isEmpty(clusterHosts)) {
+          for (ConfigGroup configGroup : configGroups.values()) {
+            // Based on current implementation of ConfigGroupImpl the
+            // host mapping would be loaded only if the host actually exists
+            // in the host table
+            Map<Long, Host> hosts = configGroup.getHosts();
+            boolean addToOutput = false;
+            if (!MapUtils.isEmpty(hosts)) {
+              for (Host host : hosts.values()) {
+                // Lookup by hostname - It does have a unique constraint
+                if (!clusterHosts.containsKey(host.getHostName())) {
+                  Set<Long> hostIds = nonMappedHostIds.get(configGroup.getId());
+                  if (CollectionUtils.isEmpty(hostIds)) {
+                    hostIds = new HashSet<>();
+                    nonMappedHostIds.put(configGroup.getId(), hostIds);
+                  }
+                  hostIds.add(host.getHostId());
+                  addToOutput = true;
+                }
+              }
+            }
+            if (addToOutput) {
+              output.append("( ");
+              output.append(configGroup.getName());
+              output.append(", ");
+              output.append(configGroup.getTag());
+              output.append(", ");
+              output.append(nonMappedHostIds.get(configGroup.getId()).size());
+              output.append(" ), ");
+            }
+          }
+        }
+      }
+    }
+    if (!MapUtils.isEmpty(nonMappedHostIds) && warnIfFound) {
+      output.replace(output.lastIndexOf(","), output.length(), "]");
+      warning("You have config group host mappings with hosts that are no " +
+        "longer associated with the cluster, {}. Run --auto-fix-database to " +
+        "fix this automatically. Alternatively, you can remove this mapping " +
+        "from the UI.", output.toString());
+    }
+
+    return nonMappedHostIds;
+  }
+
+  /**
+   * Fix inconsistencies found by @checkConfigGroupHostMapping
+   */
+  @Transactional
+  static void fixConfigGroupHostMappings() {
+    Map<Long, Set<Long>> nonMappedHostIds = checkConfigGroupHostMapping(false);
+    Clusters clusters = injector.getInstance(Clusters.class);
+
+    if (!MapUtils.isEmpty(nonMappedHostIds)) {
+      LOG.info("Fixing {} config groups with inconsistent host mappings", nonMappedHostIds.size());
+
+      for (Map.Entry<Long, Set<Long>> nonMappedHostEntry : nonMappedHostIds.entrySet())
{
+        if (!MapUtils.isEmpty(clusters.getClusters())) {
+          for (Cluster cluster : clusters.getClusters().values()) {
+            Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+            if (!MapUtils.isEmpty(configGroups)) {
+              ConfigGroup configGroup = configGroups.get(nonMappedHostEntry.getKey());
+              if (configGroup != null) {
+                for (Long hostId : nonMappedHostEntry.getValue()) {
+                  try {
+                    configGroup.removeHost(hostId);
+                  } catch (AmbariException e) {
+                    LOG.warn("Unable to fix inconsistency by removing host " +
+                      "mapping for config group: {}, service: {}, hostId = {}",
+                      configGroup.getName(), configGroup.getTag(), hostId);
+                  }
+                }
+              } else {
+                LOG.warn("Unable to find config group with id = {}", nonMappedHostEntry.getKey());
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
   private static void ensureConnection() {
     if (connection == null) {
       if (dbAccessor == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/30bea532/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
deleted file mode 100644
index d35fc1a..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.utilities;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Scanner;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.MetainfoDAO;
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.ClusterStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/*This class should not be used anymore
-* now we will use DatabaseConsistencyChecker*/
-public class DatabaseChecker {
-
-  static Logger LOG = LoggerFactory.getLogger(DatabaseChecker.class);
-
-  @Inject
-  static Injector injector;
-  static AmbariMetaInfo ambariMetaInfo;
-  static MetainfoDAO metainfoDAO;
-
-  public static void checkDBConsistency() throws AmbariException {
-    LOG.info("Checking DB consistency");
-
-    boolean checkPassed = true;
-    if (ambariMetaInfo == null) {
-      ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    }
-
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    List<ClusterEntity> clusters = clusterDAO.findAll();
-    for (ClusterEntity clusterEntity: clusters) {
-      StackId stackId = new StackId(clusterEntity.getDesiredStack());
-
-      Collection<ClusterServiceEntity> serviceEntities =
-        clusterEntity.getClusterServiceEntities();
-      for (ClusterServiceEntity clusterServiceEntity : serviceEntities) {
-
-        ServiceDesiredStateEntity serviceDesiredStateEntity =
-          clusterServiceEntity.getServiceDesiredStateEntity();
-        if (serviceDesiredStateEntity == null) {
-          checkPassed = false;
-          LOG.error(String.format("ServiceDesiredStateEntity is null for " +
-              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s ",
-            clusterEntity.getClusterName(), clusterServiceEntity.getServiceName()));
-        }
-        Collection<ServiceComponentDesiredStateEntity> scDesiredStateEntities =
-          clusterServiceEntity.getServiceComponentDesiredStateEntities();
-        if (scDesiredStateEntities == null ||
-          scDesiredStateEntities.isEmpty()) {
-          checkPassed = false;
-          LOG.error(String.format("serviceComponentDesiredStateEntities is null or empty
for " +
-              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s ",
-            clusterEntity.getClusterName(), clusterServiceEntity.getServiceName()));
-        } else {
-          for (ServiceComponentDesiredStateEntity scDesiredStateEnity : scDesiredStateEntities)
{
-
-            Collection<HostComponentDesiredStateEntity> schDesiredStateEntities =
-              scDesiredStateEnity.getHostComponentDesiredStateEntities();
-            Collection<HostComponentStateEntity> schStateEntities =
-              scDesiredStateEnity.getHostComponentStateEntities();
-
-            ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-              stackId.getStackName(), stackId.getStackVersion(),
-              scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName());
-
-            boolean zeroCardinality = componentInfo.getCardinality() == null
-              || componentInfo.getCardinality().startsWith("0")
-              || scDesiredStateEnity.getComponentName().equals("SECONDARY_NAMENODE"); //
cardinality 0 for NameNode HA
-
-            boolean componentCheckFailed = false;
-
-            if (schDesiredStateEntities == null) {
-              componentCheckFailed = true;
-              LOG.error(String.format("hostComponentDesiredStateEntities is null for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, componentName=%s
",
-                clusterEntity.getClusterName(), scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName()));
-            } else if (!zeroCardinality && schDesiredStateEntities.isEmpty()) {
-              componentCheckFailed = true;
-              LOG.error(String.format("hostComponentDesiredStateEntities is empty for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, componentName=%s
",
-                clusterEntity.getClusterName(), scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName()));
-            }
-
-            if (schStateEntities == null) {
-              componentCheckFailed = true;
-              LOG.error(String.format("hostComponentStateEntities is null for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, componentName=%s
",
-                clusterEntity.getClusterName(), scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName()));
-            } else if (!zeroCardinality && schStateEntities.isEmpty()) {
-              componentCheckFailed = true;
-              LOG.error(String.format("hostComponentStateEntities is empty for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, componentName=%s
",
-                clusterEntity.getClusterName(), scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName()));
-            }
-
-            if (!componentCheckFailed &&
-              schDesiredStateEntities.size() != schStateEntities.size()) {
-              checkPassed = false;
-              LOG.error(String.format("HostComponentStateEntities and HostComponentDesiredStateEntities
" +
-                  "tables must contain equal number of rows mapped to ServiceComponentDesiredStateEntity,
" +
-                  "(clusterName=%s, serviceName=%s, componentName=%s) ", clusterEntity.getClusterName(),
-                scDesiredStateEnity.getServiceName(), scDesiredStateEnity.getComponentName()));
-            }
-            checkPassed = checkPassed && !componentCheckFailed;
-          }
-        }
-      }
-    }
-    if (checkPassed) {
-      LOG.info("DB consistency check passed.");
-    } else {
-      String errorMessage = "DB consistency check failed. Run \"ambari-server start --skip-database-validation\"
to skip validation.";
-      LOG.error(errorMessage);
-      throw new AmbariException(errorMessage);
-    }
-  }
-
-  private static boolean clusterConfigsContainTypeAndTag(Collection<ClusterConfigEntity>
clusterConfigEntities,
-                                                         String typeName, String tag) {
-    for (ClusterConfigEntity clusterConfigEntity : clusterConfigEntities) {
-      if (typeName.equals(clusterConfigEntity.getType()) && tag.equals(clusterConfigEntity.getTag()))
{
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Validates configuration consistency. Checks that every config type from stack is presented
in
-   * ClusterConfigMapping. Checks that for each config type exactly one is selected. Checks
that ClusterConfig
-   * contains type_names and tags from ClusterConfigMapping.
-   *
-   * @throws AmbariException if check failed
-   */
-  public static void checkDBConfigsConsistency() throws AmbariException {
-    LOG.info("Checking DB configs consistency");
-
-    boolean checkPassed = true;
-
-    if (ambariMetaInfo == null) {
-      ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    }
-
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    List<ClusterEntity> clusters = clusterDAO.findAll();
-    if (clusters != null) {
-      for (ClusterEntity clusterEntity : clusters) {
-        Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
-        Collection<ClusterConfigEntity> clusterConfigEntities = clusterEntity.getClusterConfigEntities();
-
-        if (configMappingEntities != null) {
-          Map<String, Integer> selectedCountForType = new HashMap<>();
-          for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities)
{
-            String typeName = clusterConfigMappingEntity.getType();
-            if (clusterConfigMappingEntity.isSelected() > 0) {
-              int selectedCount = selectedCountForType.get(typeName) != null ? selectedCountForType.get(typeName)
: 0;
-              selectedCountForType.put(typeName, selectedCount + 1);
-
-              // Check that ClusterConfig contains type_name and tag from ClusterConfigMapping
-              if (!clusterConfigsContainTypeAndTag(clusterConfigEntities, typeName, clusterConfigMappingEntity.getTag()))
{
-                checkPassed = false;
-                LOG.error("ClusterConfig does not contain mapping for type_name=" + typeName
+ " tag="
-                    + clusterConfigMappingEntity.getTag());
-              }
-            } else {
-              if (!selectedCountForType.containsKey(typeName)) {
-                selectedCountForType.put(typeName, 0);
-              }
-            }
-          }
-
-          // Check that every config type from stack is presented in ClusterConfigMapping
-          Collection<ClusterServiceEntity> clusterServiceEntities = clusterEntity.getClusterServiceEntities();
-          ClusterStateEntity clusterStateEntity = clusterEntity.getClusterStateEntity();
-          if (clusterStateEntity != null) {
-            StackEntity currentStack = clusterStateEntity.getCurrentStack();
-            StackInfo stack = ambariMetaInfo.getStack(currentStack.getStackName(), currentStack.getStackVersion());
-
-            for (ClusterServiceEntity clusterServiceEntity : clusterServiceEntities) {
-              if (!State.INIT.equals(clusterServiceEntity.getServiceDesiredStateEntity().getDesiredState()))
{
-                String serviceName = clusterServiceEntity.getServiceName();
-                ServiceInfo serviceInfo = ambariMetaInfo.getService(stack.getName(), stack.getVersion(),
serviceName);
-                for (String configTypeName : serviceInfo.getConfigTypeAttributes().keySet())
{
-                  if (selectedCountForType.get(configTypeName) == null) {
-                    checkPassed = false;
-                    LOG.error("ClusterConfigMapping does not contain mapping for service="
+ serviceName + " type_name="
-                        + configTypeName);
-                  } else {
-                    // Check that for each config type exactly one is selected
-                    if (selectedCountForType.get(configTypeName) == 0) {
-                      checkPassed = false;
-                      LOG.error("ClusterConfigMapping selected count is 0 for service=" +
serviceName + " type_name="
-                          + configTypeName);
-                    } else if (selectedCountForType.get(configTypeName) > 1) {
-                      checkPassed = false;
-                      LOG.error("ClusterConfigMapping selected count is more than 1 for service="
+ serviceName
-                          + " type_name=" + configTypeName);
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-
-    if (checkPassed) {
-      LOG.info("DB configs consistency check passed.");
-    } else {
-      String errorMessage = "DB configs consistency check failed. Run \"ambari-server start
--skip-database-validation\" to skip validation.";
-      LOG.error(errorMessage);
-      throw new AmbariException(errorMessage);
-    }
-  }
-
-  public static void checkDBVersion() throws AmbariException {
-
-    LOG.info("Checking DB store version");
-    if (metainfoDAO == null) {
-      metainfoDAO = injector.getInstance(MetainfoDAO.class);
-    }
-
-    MetainfoEntity schemaVersionEntity = metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY);
-    String schemaVersion = null;
-
-    if (schemaVersionEntity != null) {
-      schemaVersion = schemaVersionEntity.getMetainfoValue();
-    }
-
-    Configuration conf = injector.getInstance(Configuration.class);
-    File versionFile = new File(conf.getServerVersionFilePath());
-    if (!versionFile.exists()) {
-      throw new AmbariException("Server version file does not exist.");
-    }
-    String serverVersion = null;
-    try (Scanner scanner = new Scanner(versionFile)) {
-      serverVersion = scanner.useDelimiter("\\Z").next();
-
-    } catch (IOException ioe) {
-      throw new AmbariException("Unable to read server version file.");
-    }
-
-    if (schemaVersionEntity==null || VersionUtils.compareVersions(schemaVersion, serverVersion,
3) != 0) {
-      String error = "Current database store version is not compatible with " +
-        "current server version"
-        + ", serverVersion=" + serverVersion
-        + ", schemaVersion=" + schemaVersion;
-      LOG.warn(error);
-      throw new AmbariException(error);
-    }
-
-    LOG.info("DB store version is compatible");
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/30bea532/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 03edcf8..9058a63 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -30,6 +30,7 @@ import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
+import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.controller.internal.ConfigurationResourceProvider;
 import org.apache.ambari.server.logging.LockFactory;
@@ -172,10 +173,13 @@ public class ConfigGroupImpl implements ConfigGroup {
         if (host != null && hostEntity != null) {
           m_hosts.put(hostEntity.getHostId(), host);
         }
-      } catch (Exception e) {
+      } catch (HostNotFoundException e) {
         LOG.warn("Host {} seems to be deleted but Config group {} mapping " +
           "still exists !", hostMappingEntity.getHostname(), configGroupName);
         LOG.debug("Host seems to be deleted but Config group mapping still exists !", e);
+      } catch (Exception ae) {
+        LOG.error("Exception retrieving host mapping for config group {} " +
+          "with id = {}", configGroupEntity.getGroupName(), configGroupEntity.getGroupId());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/30bea532/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index 9c8eb74..9e85fa2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -36,6 +36,7 @@ import java.sql.Statement;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.persistence.EntityManager;
 
@@ -43,8 +44,11 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.EasyMockSupport;
 import org.junit.Assert;
@@ -646,4 +650,77 @@ public class DatabaseConsistencyCheckHelperTest {
 
     easyMockSupport.verifyAll();
   }
+
+  @Test
+  public void testConfigGroupHostMappings() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
+
+    final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
+    final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
+    final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
+    final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+
+        bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
+        bind(EntityManager.class).toInstance(mockEntityManager);
+        bind(DBAccessor.class).toInstance(mockDBDbAccessor);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(OsFamily.class).toInstance(mockOSFamily);
+      }
+    });
+
+    Map<String, Cluster> clusters = new HashMap<>();
+    Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    clusters.put("c1", cluster);
+    expect(mockClusters.getClusters()).andReturn(clusters).anyTimes();
+
+    Map<Long, ConfigGroup> configGroupMap = new HashMap<>();
+    ConfigGroup cg1 = easyMockSupport.createNiceMock(ConfigGroup.class);
+    ConfigGroup cg2 = easyMockSupport.createNiceMock(ConfigGroup.class);
+    configGroupMap.put(1L, cg1);
+    configGroupMap.put(2L, cg2);
+
+    expect(cluster.getConfigGroups()).andReturn(configGroupMap).anyTimes();
+
+    expect(cluster.getClusterName()).andReturn("c1").anyTimes();
+
+    Map<String, Host> hosts = new HashMap<>();
+    Host h1 = easyMockSupport.createNiceMock(Host.class);
+    Host h2 = easyMockSupport.createNiceMock(Host.class);
+    hosts.put("h1", h1);
+    expect(mockClusters.getHostsForCluster("c1")).andReturn(hosts);
+
+    Map<Long, Host> cgHosts = new HashMap<>();
+    cgHosts.put(1L, h1);
+    cgHosts.put(2L, h2);
+
+    expect(cg1.getHosts()).andReturn(cgHosts);
+
+    expect(h1.getHostName()).andReturn("h1").anyTimes();
+    expect(h2.getHostName()).andReturn("h2").anyTimes() ;
+    expect(h1.getHostId()).andReturn(1L).anyTimes();
+    expect(h2.getHostId()).andReturn(2L).anyTimes();
+
+    expect(cg1.getId()).andReturn(1L).anyTimes();
+    expect(cg2.getId()).andReturn(2L).anyTimes();
+    expect(cg1.getName()).andReturn("cg1").anyTimes();
+    expect(cg2.getName()).andReturn("cg2").anyTimes();
+
+    DatabaseConsistencyCheckHelper.setInjector(mockInjector);
+
+    easyMockSupport.replayAll();
+
+    Map<Long, Set<Long>> hostIds = DatabaseConsistencyCheckHelper.checkConfigGroupHostMapping(true);
+
+    easyMockSupport.verifyAll();
+
+    Assert.assertNotNull(hostIds);
+    Assert.assertEquals(1, hostIds.size());
+    Assert.assertEquals(1L, hostIds.keySet().iterator().next().longValue());
+    Assert.assertEquals(2L, hostIds.get(1L).iterator().next().longValue());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/30bea532/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/DatabaseCheckerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/DatabaseCheckerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/DatabaseCheckerTest.java
deleted file mode 100644
index 3f73657..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/DatabaseCheckerTest.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.utilities;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.fail;
-
-import java.sql.SQLException;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.MetainfoDAO;
-import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/*Ignore this test because DatabaseChecker is not used anymore and it will be removed soon*/
-
-public class DatabaseCheckerTest {
-  private static Injector injector;
-
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-  }
-
-  @Before
-  public void setup() throws Exception {
-    injector.injectMembers(this);
-  }
-
-  @After
-  public void teardown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Ignore
-  @Test
-  public void testCheckDBVersion_Valid() throws Exception {
-    MetainfoDAO metainfoDAO =  createMock(MetainfoDAO.class);
-    MetainfoEntity metainfoEntity = new MetainfoEntity();
-    String serverVersion = ambariMetaInfo.getServerVersion();
-    metainfoEntity.setMetainfoName(Configuration.SERVER_VERSION_KEY);
-    metainfoEntity.setMetainfoValue(serverVersion);
-    expect(metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY)).
-      andReturn(metainfoEntity);
-    replay(metainfoDAO);
-    DatabaseChecker.metainfoDAO = metainfoDAO;
-    DatabaseChecker.ambariMetaInfo = ambariMetaInfo;
-    try {
-      DatabaseChecker.checkDBVersion();
-    } catch (AmbariException ae) {
-      fail("DB versions check failed.");
-    }
-  }
-
-  @Ignore
-  @Test(expected = AmbariException.class)
-  public void testCheckDBVersionInvalid() throws Exception {
-    MetainfoDAO metainfoDAO =  createMock(MetainfoDAO.class);
-    MetainfoEntity metainfoEntity = new MetainfoEntity();
-    metainfoEntity.setMetainfoName(Configuration.SERVER_VERSION_KEY);
-    metainfoEntity.setMetainfoValue("0.0.0"); // Incompatible version
-    expect(metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY)).
-      andReturn(metainfoEntity);
-    replay(metainfoDAO);
-    DatabaseChecker.metainfoDAO = metainfoDAO;
-    DatabaseChecker.ambariMetaInfo = ambariMetaInfo;
-
-    DatabaseChecker.checkDBVersion();
-  }
-}


Mime
View raw message