ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject [3/8] ambari git commit: AMBARI-14674 - Cannot Finalize Downgrade Due To Detached ClusterEntity (jonathanhurley)
Date Sat, 16 Jan 2016 14:47:39 GMT
AMBARI-14674 - Cannot Finalize Downgrade Due To Detached ClusterEntity (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eaf27bfd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eaf27bfd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eaf27bfd

Branch: refs/heads/branch-dev-patch-upgrade
Commit: eaf27bfd65b2ca4966476b3871cde73805570c16
Parents: fb62837
Author: Jonathan Hurley <jhurley@hortonworks.com>
Authored: Thu Jan 14 15:25:32 2016 -0500
Committer: Jonathan Hurley <jhurley@hortonworks.com>
Committed: Fri Jan 15 15:00:21 2016 -0500

----------------------------------------------------------------------
 .../ambari/server/orm/dao/ClusterDAO.java       |  6 +-
 .../server/state/cluster/ClusterImpl.java       | 67 +++++++++-------
 .../server/upgrade/UpgradeCatalog170.java       | 58 +++++++-------
 .../server/state/cluster/ClusterTest.java       | 84 ++++++++++++++++++++
 4 files changed, 154 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eaf27bfd/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index 22bd8bb..1c0e38a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -19,12 +19,10 @@
 package org.apache.ambari.server.orm.dao;
 
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 
 import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
-import javax.persistence.Query;
 import javax.persistence.TypedQuery;
 import javax.persistence.criteria.CriteriaBuilder;
 import javax.persistence.criteria.CriteriaQuery;
@@ -281,8 +279,8 @@ public class ClusterDAO {
    * Update config mapping in DB
    */
   @Transactional
-  public void mergeConfigMapping(ClusterConfigMappingEntity mappingEntity) {
-    entityManagerProvider.get().merge(mappingEntity);
+  public ClusterConfigMappingEntity mergeConfigMapping(ClusterConfigMappingEntity mappingEntity)
{
+    return entityManagerProvider.get().merge(mappingEntity);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/eaf27bfd/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 3938e31..14223fe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -653,7 +653,7 @@ public class ClusterImpl implements Cluster {
         clusterEntity.setClusterName(clusterName);
 
         // RollbackException possibility if UNIQUE constraint violated
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
         clusters.updateClusterName(oldName, clusterName);
         this.clusterName = clusterName;
       }
@@ -1010,7 +1010,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setDesiredStack(stackEntity);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
 
         if (cascade) {
           for (Service service : getServices().values()) {
@@ -1078,7 +1078,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setProvisioningState(provisioningState);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
       }
     } finally {
       clusterGlobalLock.writeLock().unlock();
@@ -1112,7 +1112,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setSecurityType(securityType);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
       }
     } finally {
       clusterGlobalLock.writeLock().unlock();
@@ -1202,7 +1202,7 @@ public class ClusterImpl implements Cluster {
             HostVersionEntity hostVersionEntity = existingHostToHostVersionEntity.get(hostname);
             if (hostVersionEntity.getState() != desiredState) {
               hostVersionEntity.setState(desiredState);
-              hostVersionDAO.merge(hostVersionEntity);
+            hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
             }
 
           // Maintain the invariant that only one HostVersionEntity is allowed
@@ -1213,7 +1213,7 @@ public class ClusterImpl implements Cluster {
               && desiredState == RepositoryVersionState.CURRENT
               && currentHostVersionEntity.getState() == RepositoryVersionState.CURRENT)
{
             currentHostVersionEntity.setState(RepositoryVersionState.INSTALLED);
-            hostVersionDAO.merge(currentHostVersionEntity);
+            currentHostVersionEntity = hostVersionDAO.merge(currentHostVersionEntity);
           }
         }
       }
@@ -1312,7 +1312,7 @@ public class ClusterImpl implements Cluster {
         // Update existing host stack version
         HostVersionEntity hostVersionEntity = existingHostStackVersions.get(hostname);
         hostVersionEntity.setState(repositoryVersionState);
-        hostVersionDAO.merge(hostVersionEntity);
+        hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
       }
     }
   }
@@ -1577,7 +1577,7 @@ public class ClusterImpl implements Cluster {
         // Alternatively, transition to CURRENT during initial bootstrap if at least one
host component advertised a version
         if (hostSummary.isUpgradeFinished() && hostVersionEntity.getState().equals(RepositoryVersionState.UPGRADING)
|| performingInitialBootstrap) {
           hostVersionEntity.setState(RepositoryVersionState.CURRENT);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
       } else {
         // Handle transitions during a Stack Upgrade
@@ -1586,12 +1586,12 @@ public class ClusterImpl implements Cluster {
         // INSTALLED->UPGRADING->UPGRADED in one shot.
         if (hostSummary.isUpgradeInProgress(currentVersionEntity.getRepositoryVersion().getVersion())
&& hostVersionEntity.getState().equals(RepositoryVersionState.INSTALLED)) {
           hostVersionEntity.setState(RepositoryVersionState.UPGRADING);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
 
         if (hostSummary.isUpgradeFinished() && hostVersionEntity.getState().equals(RepositoryVersionState.UPGRADING))
{
           hostVersionEntity.setState(RepositoryVersionState.UPGRADED);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
       }
     } finally {
@@ -1673,7 +1673,7 @@ public class ClusterImpl implements Cluster {
       System.currentTimeMillis(), System.currentTimeMillis(), userName);
     clusterVersionDAO.create(clusterVersionEntity);
     clusterEntity.getClusterVersionEntities().add(clusterVersionEntity);
-    clusterDAO.merge(clusterEntity);
+    clusterEntity = clusterDAO.merge(clusterEntity);
   }
 
   /**
@@ -1769,13 +1769,13 @@ public class ClusterImpl implements Cluster {
           ClusterVersionEntity currentVersion = clusterVersionDAO.findByClusterAndStateCurrent(getClusterName());
           if (currentVersion != null) {
             currentVersion.setState(RepositoryVersionState.INSTALLED);
-            clusterVersionDAO.merge(currentVersion);
+            currentVersion = clusterVersionDAO.merge(currentVersion);
           }
         }
 
         existingClusterVersion.setState(state);
         existingClusterVersion.setEndTime(System.currentTimeMillis());
-        clusterVersionDAO.merge(existingClusterVersion);
+        existingClusterVersion = clusterVersionDAO.merge(existingClusterVersion);
 
         if (state == RepositoryVersionState.CURRENT) {
           for (HostEntity hostEntity : clusterEntity.getHostEntities()) {
@@ -1794,10 +1794,10 @@ public class ClusterImpl implements Cluster {
                   existingClusterVersion.getRepositoryVersion().getId())) {
                   target = entity;
                   target.setState(state);
-                  hostVersionDAO.merge(target);
+                  target = hostVersionDAO.merge(target);
                 } else if (entity.getState() == RepositoryVersionState.CURRENT) {
                   entity.setState(RepositoryVersionState.INSTALLED);
-                  hostVersionDAO.merge(entity);
+                  entity = hostVersionDAO.merge(entity);
                 }
               }
             }
@@ -1887,11 +1887,11 @@ public class ClusterImpl implements Cluster {
           clusterStateDAO.create(clusterStateEntity);
           clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
           clusterEntity.setClusterStateEntity(clusterStateEntity);
-          clusterDAO.merge(clusterEntity);
+          clusterEntity = clusterDAO.merge(clusterEntity);
         } else {
           clusterStateEntity.setCurrentStack(stackEntity);
-          clusterStateDAO.merge(clusterStateEntity);
-          clusterDAO.merge(clusterEntity);
+          clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
+          clusterEntity = clusterDAO.merge(clusterEntity);
         }
       }
     } catch (RollbackException e) {
@@ -2353,7 +2353,7 @@ public class ClusterImpl implements Cluster {
         serviceConfigDAO.create(serviceConfigEntity);
         if (configGroup != null) {
           serviceConfigEntity.setHostIds(new ArrayList<Long>(configGroup.getHosts().keySet()));
-          serviceConfigDAO.merge(serviceConfigEntity);
+          serviceConfigEntity = serviceConfigDAO.merge(serviceConfigEntity);
         }
       }
     } finally {
@@ -2566,7 +2566,7 @@ public class ClusterImpl implements Cluster {
       for (ClusterConfigMappingEntity entity : mappingEntities) {
         if (configTypes.contains(entity.getType()) && entity.isSelected() > 0)
{
           entity.setSelected(0);
-          clusterDAO.mergeConfigMapping(entity);
+          entity = clusterDAO.mergeConfigMapping(entity);
         }
       }
 
@@ -2633,7 +2633,7 @@ public class ClusterImpl implements Cluster {
     for (ClusterConfigMappingEntity e : entities) {
       if (e.isSelected() > 0 && e.getType().equals(type)) {
         e.setSelected(0);
-        clusterDAO.mergeConfigMapping(e);
+        e = clusterDAO.mergeConfigMapping(e);
       }
     }
 
@@ -3159,7 +3159,7 @@ public class ClusterImpl implements Cluster {
       }
 
       clusterEntity.setConfigMappingEntities(configMappingEntities);
-      clusterDAO.merge(clusterEntity);
+      clusterEntity = clusterDAO.merge(clusterEntity);
       clusterDAO.mergeConfigMappings(configMappingEntities);
 
       cacheConfigurations();
@@ -3202,10 +3202,20 @@ public class ClusterImpl implements Cluster {
     return new HashMap<>();
   }
 
-  // The caller should make sure global write lock is acquired.
+  /**
+   * Removes all configurations associated with the specified stack. The caller
+   * should make sure the cluster global write lock is acquired.
+   *
+   * @param stackId
+   * @see Cluster#getClusterGlobalLock()
+   */
   @Transactional
   void removeAllConfigsForStack(StackId stackId) {
     ClusterEntity clusterEntity = getClusterEntity();
+
+    // make sure the entity isn't stale in the current unit of work.
+    clusterDAO.refresh(clusterEntity);
+
     long clusterId = clusterEntity.getClusterId();
 
     // this will keep track of cluster config mappings that need removal
@@ -3226,6 +3236,7 @@ public class ClusterImpl implements Cluster {
         clusterDAO.removeConfig(configEntity);
         removedClusterConfigs.add(configEntity);
       }
+
       serviceConfig.getClusterConfigEntities().clear();
       serviceConfigDAO.remove(serviceConfig);
       serviceConfigEntities.remove(serviceConfig);
@@ -3242,7 +3253,8 @@ public class ClusterImpl implements Cluster {
       removedClusterConfigs.add(clusterConfig);
     }
 
-    clusterDAO.merge(clusterEntity);
+    clusterEntity.setClusterConfigEntities(clusterConfigEntities);
+    clusterEntity = clusterDAO.merge(clusterEntity);
 
     // remove config mappings
     Collection<ClusterConfigMappingEntity> configMappingEntities =
@@ -3266,7 +3278,8 @@ public class ClusterImpl implements Cluster {
       }
     }
 
-    clusterDAO.merge(clusterEntity);
+    clusterEntity.setConfigMappingEntities(configMappingEntities);
+    clusterEntity = clusterDAO.merge(clusterEntity);
   }
 
   /**
@@ -3276,10 +3289,6 @@ public class ClusterImpl implements Cluster {
   public void removeConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();
     try {
-      // make sure the entity isn't stale in the current unit of work.
-      ClusterEntity clusterEntity = getClusterEntity();
-      clusterDAO.refresh(clusterEntity);
-
       removeAllConfigsForStack(stackId);
       cacheConfigurations();
     } finally {

http://git-wip-us.apache.org/repos/asf/ambari/blob/eaf27bfd/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
index 3d00c29..91de82a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
@@ -18,9 +18,32 @@
 
 package org.apache.ambari.server.upgrade;
 
-import com.google.common.reflect.TypeToken;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.lang.reflect.Type;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration.DatabaseType;
@@ -83,30 +106,9 @@ import org.apache.ambari.server.view.ViewRegistry;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import javax.persistence.criteria.CriteriaBuilder;
-import javax.persistence.criteria.CriteriaQuery;
-import javax.persistence.criteria.Expression;
-import javax.persistence.criteria.Predicate;
-import javax.persistence.criteria.Root;
-import java.lang.reflect.Type;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
+import com.google.common.reflect.TypeToken;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
 
 /**
  * Upgrade catalog for version 1.7.0.
@@ -715,7 +717,7 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
       for (ClusterConfigMappingEntity configMapping : cluster.getConfigMappingEntities())
{
         if (configMapping.getType().equals(Configuration.MAPREDUCE2_LOG4J_CONFIG_TAG)) {
           configMapping.setSelected(0);
-          clusterDAO.mergeConfigMapping(configMapping);
+          configMapping = clusterDAO.mergeConfigMapping(configMapping);
         }
       }
       clusterDAO.merge(cluster);

http://git-wip-us.apache.org/repos/asf/ambari/blob/eaf27bfd/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index d2ba396..2c6b0c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -2370,6 +2370,90 @@ public class ClusterTest {
         Assert.assertEquals(1, clusterConfigMapping.isSelected());
       }
     }
+  }
+
+  /**
+   * Tests removing configurations and configuration mappings by stack.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRemoveConfigurations() throws Exception {
+    createDefaultCluster();
+    Cluster cluster = clusters.getCluster("c1");
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    StackId stackId = cluster.getCurrentStackVersion();
+    StackId newStackId = new StackId("HDP-2.0.6");
+
+    StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+    StackEntity newStack = stackDAO.find(newStackId.getStackName(), newStackId.getStackVersion());
+
+    Assert.assertFalse(stackId.equals(newStackId));
+
+    String configType = "foo-type";
+
+    ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
+    clusterConfig.setClusterEntity(clusterEntity);
+    clusterConfig.setConfigId(1L);
+    clusterConfig.setStack(currentStack);
+    clusterConfig.setTag("version-1");
+    clusterConfig.setData("{}");
+    clusterConfig.setType(configType);
+    clusterConfig.setTimestamp(1L);
+    clusterConfig.setVersion(1L);
+
+    clusterDAO.createConfig(clusterConfig);
+    clusterEntity.getClusterConfigEntities().add(clusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    ClusterConfigEntity newClusterConfig = new ClusterConfigEntity();
+    newClusterConfig.setClusterEntity(clusterEntity);
+    newClusterConfig.setConfigId(2L);
+    newClusterConfig.setStack(newStack);
+    newClusterConfig.setTag("version-2");
+    newClusterConfig.setData("{}");
+    newClusterConfig.setType(configType);
+    newClusterConfig.setTimestamp(2L);
+    newClusterConfig.setVersion(2L);
+
+    clusterDAO.createConfig(newClusterConfig);
+    clusterEntity.getClusterConfigEntities().add(newClusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // config mapping set to 1
+    ClusterConfigMappingEntity configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(1L);
+    configMapping.setSelected(1);
+    configMapping.setTag("version-1");
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+
+    // new config mapping set to 0
+    ClusterConfigMappingEntity newConfigMapping = new ClusterConfigMappingEntity();
+    newConfigMapping.setClusterEntity(clusterEntity);
+    newConfigMapping.setCreateTimestamp(2L);
+    newConfigMapping.setSelected(0);
+    newConfigMapping.setTag("version-2");
+    newConfigMapping.setType(configType);
+    newConfigMapping.setUser("admin");
+
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterDAO.persistConfigMapping(newConfigMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+    clusterEntity.getConfigMappingEntities().add(newConfigMapping);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // get back the cluster configs for the new stack
+    List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
+        cluster.getClusterId(), newStackId);
+
+    Assert.assertEquals(1, clusterConfigs.size());
+
+    // remove the configs
+    cluster.removeConfigurations(newStackId);
 
+    clusterConfigs = clusterDAO.getAllConfigurations(cluster.getClusterId(), newStackId);
+    Assert.assertEquals(0, clusterConfigs.size());
   }
 }


Mime
View raw message