ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mpapirkovs...@apache.org
Subject [3/3] git commit: AMBARI-5025. Add unit tests for java upgrade. (mpapirkovskyy)
Date Tue, 11 Mar 2014 17:38:25 GMT
AMBARI-5025. Add unit tests for java upgrade. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e37c2b57
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e37c2b57
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e37c2b57

Branch: refs/heads/trunk
Commit: e37c2b5763f413e4ec1dc9fc12f4de70f8ca7e8a
Parents: 29ccfa7
Author: Myroslav Papirkovskyy <mpapyrkovskyy@hortonworks.com>
Authored: Tue Mar 11 19:37:04 2014 +0200
Committer: Myroslav Papirkovskyy <mpapyrkovskyy@hortonworks.com>
Committed: Tue Mar 11 19:38:18 2014 +0200

----------------------------------------------------------------------
 .../server/configuration/Configuration.java     |   7 +-
 .../ambari/server/orm/DBAccessorImpl.java       |   2 +
 .../server/orm/dao/ClusterServiceDAO.java       |   8 +
 .../ambari/server/orm/dao/ClusterStateDAO.java  |   8 +
 .../orm/dao/ConfigGroupConfigMappingDAO.java    |   5 +
 .../orm/dao/ConfigGroupHostMappingDAO.java      |   5 +
 .../apache/ambari/server/orm/dao/DaoUtils.java  |  17 ++
 .../server/orm/dao/ExecutionCommandDAO.java     |   8 +
 .../server/orm/dao/HostConfigMappingDAO.java    |   5 +
 .../server/orm/dao/HostRoleCommandDAO.java      |   5 +
 .../ambari/server/orm/dao/HostStateDAO.java     |   8 +
 .../ambari/server/orm/dao/RequestDAO.java       |   5 +
 .../orm/dao/RequestScheduleBatchRequestDAO.java |   5 +
 .../apache/ambari/server/orm/dao/RoleDAO.java   |   8 +
 .../server/orm/dao/RoleSuccessCriteriaDAO.java  |   8 +
 .../apache/ambari/server/orm/dao/StageDAO.java  |   5 +
 .../server/upgrade/AbstractUpgradeCatalog.java  |  46 ++-
 .../server/upgrade/SchemaUpgradeHelper.java     |  59 ++--
 .../server/upgrade/UpgradeCatalog150.java       |  48 ++--
 ambari-server/src/main/python/ambari-server.py  |  18 +-
 .../ambari/server/upgrade/UpgradeTest.java      | 208 ++++++++++++++
 .../src/test/python/TestAmbariServer.py         |   4 +-
 .../ddl-scripts/Ambari-DDL-Derby-1.2.3.sql      | 219 +++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.2.4.sql      | 264 +++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.2.5.sql      | 264 +++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.4.0.sql      | 280 +++++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.4.1.sql      | 245 ++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.4.2.sql      | 246 ++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.4.3.sql      | 244 ++++++++++++++++
 .../ddl-scripts/Ambari-DDL-Derby-1.4.4.sql      | 217 ++++++++++++++
 30 files changed, 2387 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index c02d633..68d3d88 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -612,7 +612,12 @@ public class Configuration {
 
   public String getDatabaseUrl() {
     if (getPersistenceType() != PersistenceType.IN_MEMORY) {
-      return properties.getProperty(SERVER_JDBC_URL_KEY, getLocalDatabaseUrl());
+      String URI = properties.getProperty(SERVER_JDBC_URL_KEY);
+      if (URI != null) {
+        return URI;
+      } else {
+        return getLocalDatabaseUrl();
+      }
     } else {
       return JDBC_IN_MEMORY_URL;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index f73e171..9f8a359 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -64,6 +64,8 @@ public class DBAccessorImpl implements DBAccessor {
         configuration.getDatabaseUser(),
         configuration.getDatabasePassword());
 
+      connection.setAutoCommit(true); //enable autocommit
+
       //TODO create own mapping and platform classes for supported databases
       String vendorName = connection.getMetaData().getDatabaseProductName() +
         connection.getMetaData().getDatabaseMajorVersion();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
index b0bc025..f264238 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
@@ -28,11 +28,14 @@ import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
 import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
 import javax.persistence.TypedQuery;
+import java.util.List;
 
 @Singleton
 public class ClusterServiceDAO {
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public ClusterServiceEntity findByPK(ClusterServiceEntityPK clusterServiceEntityPK) {
@@ -54,6 +57,11 @@ public class ClusterServiceDAO {
   }
 
   @Transactional
+  public List<ClusterServiceEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), ClusterServiceEntity.class);
+  }
+
+  @Transactional
   public void refresh(ClusterServiceEntity clusterServiceEntity) {
     entityManagerProvider.get().refresh(clusterServiceEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
index a2a890c..88e363c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
@@ -25,11 +25,14 @@ import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.orm.entities.ClusterStateEntity;
 
 import javax.persistence.EntityManager;
+import java.util.List;
 
 @Singleton
 public class ClusterStateDAO {
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public ClusterStateEntity findByPK(long clusterId) {
@@ -37,6 +40,11 @@ public class ClusterStateDAO {
   }
 
   @Transactional
+  public List<ClusterStateEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), ClusterStateEntity.class);
+  }
+
+  @Transactional
   public void refresh(ClusterStateEntity clusterStateEntity) {
     entityManagerProvider.get().refresh(clusterStateEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupConfigMappingDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupConfigMappingDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupConfigMappingDAO.java
index 15ae9ee..bc781c5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupConfigMappingDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupConfigMappingDAO.java
@@ -56,6 +56,11 @@ public class ConfigGroupConfigMappingDAO {
   }
 
   @Transactional
+  public List<ConfigGroupConfigMappingEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), ConfigGroupConfigMappingEntity.class);
+  }
+
+  @Transactional
   public void create(ConfigGroupConfigMappingEntity
                          configGroupConfigMappingEntity) {
     entityManagerProvider.get().persist(configGroupConfigMappingEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
index 8ff21b5..d3f7515 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
@@ -162,6 +162,11 @@ public class ConfigGroupHostMappingDAO {
   }
 
   @Transactional
+  public List<ConfigGroupHostMappingEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), ConfigGroupHostMappingEntity.class);
+  }
+
+  @Transactional
   public void create(ConfigGroupHostMappingEntity
                          configGroupHostMappingEntity) {
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
index f35b483..b4e29b9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
@@ -20,15 +20,32 @@ package org.apache.ambari.server.orm.dao;
 
 import com.google.inject.Singleton;
 
+import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
 import javax.persistence.Query;
 import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Root;
 import java.util.Collections;
 import java.util.List;
 
 @Singleton
 class DaoUtils {
 
+  public <T> List<T> selectAll(EntityManager entityManager, Class<T> entityClass) {
+    CriteriaBuilder criteriaBuilder = entityManager.getCriteriaBuilder();
+    CriteriaQuery<T> query = criteriaBuilder.createQuery(entityClass);
+    Root<T> root = query.from(entityClass);
+    query.select(root);
+    TypedQuery<T> typedQuery = entityManager.createQuery(query);
+    try {
+      return typedQuery.getResultList();
+    } catch (NoResultException ignored) {
+      return Collections.emptyList();
+    }
+  }
+
   public <T> List<T> selectList(TypedQuery<T> query, Object... parameters) {
     setParameters(query, parameters);
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
index 216acf3..47b16ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
@@ -25,12 +25,15 @@ import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
 
 import javax.persistence.EntityManager;
+import java.util.List;
 
 @Singleton
 public class ExecutionCommandDAO {
 
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public ExecutionCommandEntity findByPK(long taskId) {
@@ -38,6 +41,11 @@ public class ExecutionCommandDAO {
   }
 
   @Transactional
+  public List<ExecutionCommandEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), ExecutionCommandEntity.class);
+  }
+
+  @Transactional
   public void create(ExecutionCommandEntity executionCommand) {
     entityManagerProvider.get().persist(executionCommand);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostConfigMappingDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostConfigMappingDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostConfigMappingDAO.java
index fd56d52..a6dcd63 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostConfigMappingDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostConfigMappingDAO.java
@@ -282,6 +282,11 @@ public class HostConfigMappingDAO {
     return mappingsByType;
   }
 
+  @Transactional
+  public List<HostConfigMappingEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), HostConfigMappingEntity.class);
+  }
+
   /**
    * @param clusterId
    * @param hostName

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
index 61e2fc2..6648266 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
@@ -201,6 +201,11 @@ public class HostRoleCommandDAO {
   }
 
   @Transactional
+  public List<HostRoleCommandEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), HostRoleCommandEntity.class);
+  }
+
+  @Transactional
   public void create(HostRoleCommandEntity stageEntity) {
     entityManagerProvider.get().persist(stageEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
index 383db98..384fbaa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
@@ -25,11 +25,14 @@ import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.orm.entities.HostStateEntity;
 
 import javax.persistence.EntityManager;
+import java.util.List;
 
 @Singleton
 public class HostStateDAO {
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public HostStateEntity findByHostName(String hostName) {
@@ -37,6 +40,11 @@ public class HostStateDAO {
   }
 
   @Transactional
+  public List<HostStateEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), HostStateEntity.class);
+  }
+
+  @Transactional
   public void refresh(HostStateEntity hostStateEntity) {
     entityManagerProvider.get().refresh(hostStateEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
index 7a2c836..50cb0fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
@@ -48,6 +48,11 @@ public class RequestDAO {
   }
 
   @Transactional
+  public List<RequestEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), RequestEntity.class);
+  }
+
+  @Transactional
   public boolean isAllTasksCompleted(long requestId) {
     TypedQuery<Long> query = entityManagerProvider.get().createQuery(
         "SELECT task.taskId FROM HostRoleCommandEntity task WHERE task.requestId = ?1 AND " +

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java
index 6a25b65..b073bf0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java
@@ -58,6 +58,11 @@ public class RequestScheduleBatchRequestDAO {
   }
 
   @Transactional
+  public List<RequestScheduleBatchRequestEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), RequestScheduleBatchRequestEntity.class);
+  }
+
+  @Transactional
   public void create(RequestScheduleBatchRequestEntity batchRequestEntity) {
     entityManagerProvider.get().persist(batchRequestEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
index c66665c..5b37b0e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
@@ -24,12 +24,15 @@ import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.orm.entities.RoleEntity;
 
 import javax.persistence.EntityManager;
+import java.util.List;
 
 @Singleton
 public class RoleDAO {
 
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public RoleEntity findByName(String roleName) {
@@ -37,6 +40,11 @@ public class RoleDAO {
   }
 
   @Transactional
+  public List<RoleEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), RoleEntity.class);
+  }
+
+  @Transactional
   public void create(RoleEntity role) {
     role.setRoleName(role.getRoleName().toLowerCase());
     entityManagerProvider.get().persist(role);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
index 2f535f0..6949ee6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
@@ -26,12 +26,15 @@ import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity;
 import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntityPK;
 
 import javax.persistence.EntityManager;
+import java.util.List;
 
 @Singleton
 public class RoleSuccessCriteriaDAO {
 
   @Inject
   Provider<EntityManager> entityManagerProvider;
+  @Inject
+  DaoUtils daoUtils;
 
   @Transactional
   public RoleSuccessCriteriaEntity findByPK(RoleSuccessCriteriaEntityPK roleSuccessCriteriaEntityPK) {
@@ -40,6 +43,11 @@ public class RoleSuccessCriteriaDAO {
   }
 
   @Transactional
+  public List<RoleSuccessCriteriaEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), RoleSuccessCriteriaEntity.class);
+  }
+
+  @Transactional
   public void create(RoleSuccessCriteriaEntity stageEntity) {
     entityManagerProvider.get().persist(stageEntity);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
index 79c001a..c8c2fa8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
@@ -47,6 +47,11 @@ public class StageDAO {
   }
 
   @Transactional
+  public List<StageEntity> findAll() {
+    return daoUtils.selectAll(entityManagerProvider.get(), StageEntity.class);
+  }
+
+  @Transactional
   public long getLastRequestId() {
     TypedQuery<Long> query = entityManagerProvider.get().createQuery("SELECT max(stage.requestId) FROM StageEntity stage", Long.class);
     Long result = daoUtils.selectSingle(query);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 83278b7..9760e80 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -20,9 +20,12 @@ package org.apache.ambari.server.upgrade;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Provider;
+import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.utils.VersionUtils;
 import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
@@ -83,37 +86,26 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
   }
 
   /**
-   * Read server version file
-   * @return
-   */
-  protected String getAmbariServerVersion() {
-    String versionFilePath = configuration.getServerVersionFilePath();
-    try {
-      return FileUtils.readFileToString(new File(versionFilePath));
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    return null;
-  }
-
-  /**
    * Update metainfo to new version.
    */
+  @Transactional
   public int updateMetaInfoVersion(String version) {
-    //TODO verify version/server version usage
-    String ambariServerVersion = getAmbariServerVersion();
     int rows = 0;
-
-    if (ambariServerVersion != null) {
-      try {
-        dbAccessor.executeQuery(String.format("INSERT INTO metainfo (metainfo_key, " +
-          "metainfo_value) VALUES ('version', '%s')", version), true);
-
-        rows = dbAccessor.updateTable("metainfo", "metainfo_value",
-          version, "WHERE metainfo_key = 'version'");
-      } catch (SQLException e) {
-        LOG.error("Failed updating metainfo table.", e);
+    if (version != null) {
+      MetainfoDAO metainfoDAO = injector.getInstance(MetainfoDAO.class);
+
+      MetainfoEntity versionEntity = metainfoDAO.findByKey("version");
+
+      if (versionEntity != null) {
+        versionEntity.setMetainfoValue(version);
+        metainfoDAO.merge(versionEntity);
+      } else {
+        versionEntity = new MetainfoEntity();
+        versionEntity.setMetainfoName("version");
+        versionEntity.setMetainfoValue(version);
+        metainfoDAO.create(versionEntity);
       }
+
     }
 
     return rows;
@@ -129,6 +121,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
       dbType = Configuration.ORACLE_DB_NAME;
     } else if (dbUrl.contains(Configuration.MYSQL_DB_NAME)) {
       dbType = Configuration.MYSQL_DB_NAME;
+    } else if (dbUrl.contains(Configuration.DERBY_DB_NAME)) {
+      dbType = Configuration.DERBY_DB_NAME;
     } else {
       throw new RuntimeException("Unable to determine database type.");
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index 38fd54f..b26091a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -29,16 +29,15 @@ import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
 import java.sql.ResultSet;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.InputMismatchException;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
 
 public class SchemaUpgradeHelper {
   private static final Logger LOG = LoggerFactory.getLogger
@@ -60,11 +59,11 @@ public class SchemaUpgradeHelper {
     this.configuration = configuration;
   }
 
-  private void startPersistenceService() {
+  public void startPersistenceService() {
     persistService.start();
   }
 
-  private void stopPersistenceService() {
+  public void stopPersistenceService() {
     persistService.stop();
   }
 
@@ -72,11 +71,12 @@ public class SchemaUpgradeHelper {
     return allUpgradeCatalogs;
   }
 
-  private String readSourceVersion() {
+  public String readSourceVersion() {
     String sourceVersion = null;
 
+    ResultSet resultSet = null;
     try {
-      ResultSet resultSet = dbAccessor.executeSelect("SELECT metainfo_value from metainfo WHERE metainfo_key='version'");
+      resultSet = dbAccessor.executeSelect("SELECT \"metainfo_value\" from metainfo WHERE \"metainfo_key\"='version'");
       if (resultSet.next()) {
         return resultSet.getString(1);
       } else {
@@ -86,11 +86,33 @@ public class SchemaUpgradeHelper {
       }
     } catch (SQLException e) {
       throw new RuntimeException("Unable to read database version", e);
+    }finally {
+      if (resultSet != null) {
+        try {
+          resultSet.close();
+        } catch (SQLException e) {
+          throw new RuntimeException("Cannot close result set");
+        }
+      }
     }
 
   }
 
   /**
+   * Read server version file
+   * @return
+   */
+  protected String getAmbariServerVersion() {
+    String versionFilePath = configuration.getServerVersionFilePath();
+    try {
+      return FileUtils.readFileToString(new File(versionFilePath));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    return null;
+  }
+
+  /**
    * Return a set Upgrade catalogs to be applied to upgrade from
    * @sourceVersion to @targetVersion
    *
@@ -128,8 +150,12 @@ public class SchemaUpgradeHelper {
    * Extension of main controller module
    */
   public static class UpgradeHelperModule extends ControllerModule {
+
     public UpgradeHelperModule() throws Exception {
+    }
 
+    public UpgradeHelperModule(Properties properties) throws Exception {
+      super(properties);
     }
 
     @Override
@@ -142,7 +168,7 @@ public class SchemaUpgradeHelper {
     }
   }
 
-  private void executeUpgrade(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
+  public void executeUpgrade(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
     LOG.info("Executing DDL upgrade...");
 
     if (upgradeCatalogs != null && !upgradeCatalogs.isEmpty()) {
@@ -163,7 +189,7 @@ public class SchemaUpgradeHelper {
     }
   }
 
-  private void executeDMLUpdates(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
+  public void executeDMLUpdates(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
     LOG.info("Execution DML changes.");
 
     if (upgradeCatalogs != null && !upgradeCatalogs.isEmpty()) {
@@ -190,11 +216,10 @@ public class SchemaUpgradeHelper {
    * @param args args[0] = target version to upgrade to.
    */
   public static void main(String[] args) throws Exception {
-    if (args.length == 0) {
-      throw new InputMismatchException("Need to provide target version.");
-    }
+    Injector injector = Guice.createInjector(new UpgradeHelperModule());
+    SchemaUpgradeHelper schemaUpgradeHelper = injector.getInstance(SchemaUpgradeHelper.class);
 
-    String targetVersion = args[0];
+    String targetVersion = schemaUpgradeHelper.getAmbariServerVersion();
     LOG.info("Upgrading schema to target version = " + targetVersion);
 
     UpgradeCatalog targetUpgradeCatalog = AbstractUpgradeCatalog
@@ -202,10 +227,6 @@ public class SchemaUpgradeHelper {
 
     LOG.debug("Target upgrade catalog. " + targetUpgradeCatalog);
 
-    Injector injector = Guice.createInjector(new UpgradeHelperModule());
-
-    SchemaUpgradeHelper schemaUpgradeHelper = injector.getInstance(SchemaUpgradeHelper.class);
-
     // Read source version from DB
     String sourceVersion = schemaUpgradeHelper.readSourceVersion();
     LOG.info("Upgrading schema from source version = " + sourceVersion);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
index 9571eb4..eabaffc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
@@ -68,9 +68,7 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
     columns.add(new DBColumnInfo("request_context", String.class, 255, null, true));
     columns.add(new DBColumnInfo("request_type", String.class, 255, null, true));
     columns.add(new DBColumnInfo("start_time", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("target_component", String.class, 255, null, true));
-    columns.add(new DBColumnInfo("target_hosts", String.class, null, null, false));
-    columns.add(new DBColumnInfo("target_service", String .class, 255, null, true));
+    columns.add(new DBColumnInfo("status", String.class, 255));
 
     dbAccessor.createTable("request", columns, "request_id");
 
@@ -243,6 +241,28 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
       dbAccessor.executeQuery("DROP DATABASE IF EXISTS ambarirca;");
     }
 
+    //Newly created tables should be filled before creating FKs
+    // Request Entries
+    String tableName = "request";
+    if (!dbAccessor.tableExists(tableName)) {
+      String msg = String.format("Table \"%s\" was not created during schema upgrade", tableName);
+      LOG.error(msg);
+      throw new AmbariException(msg);
+    }else if (!dbAccessor.tableHasData(tableName)) {
+      String query;
+      if (getDbType().equals(Configuration.POSTGRES_DB_NAME)) {
+        query = getPostgresRequestUpgradeQuery();
+      } else if (getDbType().equals(Configuration.ORACLE_DB_NAME)) {
+        query = getOracleRequestUpgradeQuery();
+      } else {
+        query = getMysqlRequestUpgradeQuery();
+      }
+
+      dbAccessor.executeQuery(query);
+    } else {
+      LOG.info("Table {} already filled", tableName);
+    }
+
     // ========================================================================
     // Add constraints
 
@@ -260,9 +280,6 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
     dbAccessor.addFKConstraint("clusterconfigmapping", "FK_clustercfgmap_cluster_id", "cluster_id", "clusters", "cluster_id", true);
     dbAccessor.addFKConstraint("requestresourcefilter", "FK_requestresourcefilter_req_id", "request_id", "request", "request_id", true);
 
-    // ========================================================================
-    // Finally update schema version
-    updateMetaInfoVersion(getTargetVersion());
   }
 
   private void moveRCATableInMySQL(String tableName, String dbName) throws SQLException {
@@ -301,22 +318,6 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
 
 
     // TODO: Convert all possible native queries using Criteria builder
-    // Request Entries
-    tableName = "request";
-    if (dbAccessor.tableExists(tableName) &&
-      !dbAccessor.tableHasData(tableName)) {
-
-      String query;
-      if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
-        query = getPostgresRequestUpgradeQuery();
-      } else if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
-        query = getOracleRequestUpgradeQuery();
-      } else {
-        query = getMysqlRequestUpgradeQuery();
-      }
-
-      dbAccessor.executeQuery(query);
-    }
 
     // Sequences
     if (dbAccessor.tableExists("ambari_sequences")) {
@@ -413,6 +414,9 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
     });
 
 
+    // ========================================================================
+    // Finally update schema version
+    updateMetaInfoVersion(getTargetVersion());
 
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 9cc78c2..7b10043 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -178,7 +178,7 @@ SECURITY_PROVIDER_KEY_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
 SCHEMA_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
                           os.pathsep + "{2} " +\
                           "org.apache.ambari.server.upgrade.SchemaUpgradeHelper" +\
-                          " {3} > " + SERVER_OUT_FILE + " 2>&1"
+                          " > " + SERVER_OUT_FILE + " 2>&1"
 
 STACK_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
                           os.pathsep + "{2} " +\
@@ -2583,14 +2583,14 @@ def compare_versions(version1, version2):
   pass
 
 
-def run_schema_upgrade(version):
+def run_schema_upgrade():
   jdk_path = find_jdk()
   if jdk_path is None:
     print_error_msg("No JDK found, please run the \"setup\" "
                     "command to install a JDK automatically or install any "
                     "JDK manually to " + JDK_INSTALL_DIR)
     return 1
-  command = SCHEMA_UPGRADE_HELPER_CMD.format(jdk_path, get_conf_dir(), get_ambari_classpath(), version)
+  command = SCHEMA_UPGRADE_HELPER_CMD.format(jdk_path, get_conf_dir(), get_ambari_classpath())
   (retcode, stdout, stderr) = run_os_command(command)
   print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
   if retcode > 0:
@@ -2674,23 +2674,13 @@ def upgrade(args):
       return -1
 
   parse_properties_file(args)
-  server_version = None
-  if args.server_version_file_path:
-    with open(args.server_version_file_path, 'r') as f:
-      server_version = f.read()
-
-  if not server_version:
-    raise FatalException(10, 'Cannot determine server version from version file '
-                         '%s' % args.server_version_file_path)
-
-  #fix local database objects owner in pre 1.5.0
   #TODO check database version
   if args.persistence_type == 'local':
     retcode, stdout, stderr = change_objects_owner(args)
     if not retcode == 0:
       raise FatalException(20, 'Unable to change owner of database objects')
 
-  retcode = run_schema_upgrade(server_version.strip())
+  retcode = run_schema_upgrade()
   if not retcode == 0:
     raise FatalException(11, 'Schema upgrade failed.')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeTest.java
new file mode 100644
index 0000000..e840839
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeTest.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Key;
+import com.google.inject.TypeLiteral;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.*;
+import org.apache.ambari.server.security.CertificateManager;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.SQLNonTransientConnectionException;
+import java.util.*;
+
+import static org.junit.Assert.assertTrue;
+
+public class UpgradeTest {
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeTest.class);
+
+  private static String DDL_PATTERN = "ddl-scripts/Ambari-DDL-Derby-%s.sql";
+  private static List<String> VERSIONS = Arrays.asList("1.4.4",
+      "1.4.3", "1.4.2", "1.4.1", "1.4.0", "1.2.5", "1.2.4",
+      "1.2.3"); //TODO add all
+  private static String DROP_DERBY_URL = "jdbc:derby:memory:myDB/ambari;drop=true";
+  private  Properties properties = new Properties();
+
+  private Injector injector;
+
+  public UpgradeTest() {
+    properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "remote");
+    properties.setProperty(Configuration.SERVER_JDBC_URL_KEY, Configuration.JDBC_IN_MEMORY_URL);
+    properties.setProperty(Configuration.SERVER_JDBC_DRIVER_KEY, Configuration.JDBC_IN_MEMROY_DRIVER);
+    properties.setProperty(Configuration.METADETA_DIR_PATH,
+      "src/test/resources/stacks");
+    properties.setProperty(Configuration.SERVER_VERSION_FILE,
+      "target/version");
+    properties.setProperty(Configuration.OS_VERSION_KEY,
+      "centos5");
+  }
+
+  @Test
+  public void testUpgrade() throws Exception {
+    String targetVersion = getLastVersion();
+    List<String> failedVersions = new ArrayList<String>();
+
+    for (String version : VERSIONS) {
+      injector = Guice.createInjector(new ControllerModule(properties));
+
+      try {
+        createSourceDatabase(version);
+
+        performUpgrade(targetVersion);
+
+        testUpgradedSchema();
+      } catch (Exception e) {
+        failedVersions.add(version);
+        e.printStackTrace();
+      }
+
+      dropDatabase();
+
+    }
+
+    assertTrue("Upgrade test failed for version: " + failedVersions, failedVersions.isEmpty());
+
+
+  }
+
+  private void dropDatabase() throws ClassNotFoundException, SQLException {
+    Class.forName(Configuration.JDBC_IN_MEMROY_DRIVER);
+    try {
+      DriverManager.getConnection(DROP_DERBY_URL);
+    } catch (SQLNonTransientConnectionException ignored) {
+      LOG.info("Database dropped ", ignored); //error 08006 expected
+    }
+  }
+
+  private void testUpgradedSchema() throws Exception {
+    injector = Guice.createInjector(new ControllerModule(properties));
+    injector.getInstance(PersistService.class).start();
+
+    //TODO join() in AmbariServer.run() prevents proper start testing, figure out
+
+    //check dao selects
+    //TODO generify DAOs for basic methods? deal with caching config group daos in such case
+    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+    clusterDAO.findAll();
+    BlueprintDAO blueprintDAO = injector.getInstance(BlueprintDAO.class);
+    blueprintDAO.findAll();
+    ClusterServiceDAO clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
+    clusterServiceDAO.findAll();
+    injector.getInstance(ClusterStateDAO.class).findAll();
+    injector.getInstance(ConfigGroupConfigMappingDAO.class).findAll();
+    injector.getInstance(ConfigGroupDAO.class).findAll();
+    injector.getInstance(ConfigGroupHostMappingDAO.class).findAll();
+    injector.getInstance(ExecutionCommandDAO.class).findAll();
+    injector.getInstance(HostComponentDesiredStateDAO.class).findAll();
+    injector.getInstance(HostComponentStateDAO.class).findAll();
+    injector.getInstance(HostConfigMappingDAO.class).findAll();
+    injector.getInstance(HostDAO.class).findAll();
+    injector.getInstance(HostRoleCommandDAO.class).findAll();
+    injector.getInstance(HostStateDAO.class).findAll();
+    injector.getInstance(KeyValueDAO.class).findAll();
+    injector.getInstance(MetainfoDAO.class).findAll();
+    injector.getInstance(RequestDAO.class).findAll();
+    injector.getInstance(RequestScheduleBatchRequestDAO.class).findAll();
+    injector.getInstance(RequestScheduleDAO.class).findAll();
+    injector.getInstance(RoleDAO.class).findAll();
+    injector.getInstance(RoleSuccessCriteriaDAO.class).findAll();
+    injector.getInstance(ServiceComponentDesiredStateDAO.class).findAll();
+    injector.getInstance(ServiceDesiredStateDAO.class).findAll();
+    injector.getInstance(StageDAO.class).findAll();
+    injector.getInstance(UserDAO.class).findAll();
+
+
+    //TODO extend checks if needed
+    injector.getInstance(PersistService.class).stop();
+
+
+  }
+
+  private void performUpgrade(String targetVersion) throws Exception {
+    Injector injector = Guice.createInjector(new SchemaUpgradeHelper.UpgradeHelperModule(properties));
+    SchemaUpgradeHelper schemaUpgradeHelper = injector.getInstance(SchemaUpgradeHelper.class);
+
+    LOG.info("Upgrading schema to target version = " + targetVersion);
+
+    UpgradeCatalog targetUpgradeCatalog = AbstractUpgradeCatalog
+      .getUpgradeCatalog(targetVersion);
+
+    LOG.debug("Target upgrade catalog. " + targetUpgradeCatalog);
+
+    // Read source version from DB
+    String sourceVersion = schemaUpgradeHelper.readSourceVersion();
+    LOG.info("Upgrading schema from source version = " + sourceVersion);
+
+    List<UpgradeCatalog> upgradeCatalogs =
+      schemaUpgradeHelper.getUpgradePath(sourceVersion, targetVersion);
+
+    schemaUpgradeHelper.executeUpgrade(upgradeCatalogs);
+
+    schemaUpgradeHelper.startPersistenceService();
+
+    schemaUpgradeHelper.executeDMLUpdates(upgradeCatalogs);
+
+    LOG.info("Upgrade successful.");
+
+    schemaUpgradeHelper.stopPersistenceService();
+
+  }
+
+  private String getLastVersion() throws Exception {
+    Injector injector = Guice.createInjector(new SchemaUpgradeHelper.UpgradeHelperModule(properties));
+    Set<UpgradeCatalog> upgradeCatalogs = injector.getInstance(Key.get(new TypeLiteral<Set<UpgradeCatalog>>() {
+    }));
+    String maxVersion = "1.2";
+    for (UpgradeCatalog upgradeCatalog : upgradeCatalogs) {
+      String targetVersion = upgradeCatalog.getTargetVersion();
+      if (VersionUtils.compareVersions(maxVersion, targetVersion) < 0) {
+        maxVersion = targetVersion;
+      }
+    }
+    return maxVersion;
+  }
+
+  private void createSourceDatabase(String version) throws IOException, SQLException {
+
+    //create database
+    String fileName = String.format(DDL_PATTERN, version);
+    fileName = this.getClass().getClassLoader().getResource(fileName).getFile();
+    DBAccessor dbAccessor = injector.getInstance(DBAccessor.class);
+    dbAccessor.executeScript(fileName);
+
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index d8eec44..a7317a9 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -2822,14 +2822,14 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     get_ambari_classpath_mock.return_value = 'test:path12'
     get_conf_dir_mock.return_value = '/etc/conf'
 
-    ambari_server.run_schema_upgrade('1.4.9.40')
+    ambari_server.run_schema_upgrade()
 
     self.assertTrue(jdk_path_mock.called)
     self.assertTrue(get_ambari_classpath_mock.called)
     self.assertTrue(get_conf_dir_mock.called)
     self.assertTrue(run_os_command_mock.called)
     run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp /etc/conf:test:path12 '
-                                           'org.apache.ambari.server.upgrade.SchemaUpgradeHelper 1.4.9.40 '
+                                           'org.apache.ambari.server.upgrade.SchemaUpgradeHelper '
                                            '> /var/log/ambari-server/ambari-server.out 2>&1')
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.3.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.3.sql b/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.3.sql
new file mode 100644
index 0000000..6c2c44d
--- /dev/null
+++ b/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.3.sql
@@ -0,0 +1,219 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+
+CREATE TABLE clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+
+CREATE TABLE clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data VARCHAR(32000) NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (cluster_id, type_name, version_tag));
+
+CREATE TABLE clusterconfigmapping (cluster_id bigint NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, PRIMARY KEY (cluster_id, type_name, create_timestamp));
+
+CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
+
+CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+
+CREATE TABLE componentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, service_name, config_type));
+
+CREATE TABLE hostcomponentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
+
+CREATE TABLE hcdesiredconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
+
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+
+CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+
+CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, ph_cpu_count INTEGER, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, disks_info VARCHAR(10000) NOT NULL, host_attributes VARCHAR(20000) NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), public_host_name VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
+
+CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL,  PRIMARY KEY (host_name));
+
+CREATE TABLE servicecomponentdesiredstate (component_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (component_name, cluster_id, service_name));
+
+CREATE TABLE serviceconfigmapping (cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name, config_type));
+
+CREATE TABLE servicedesiredstate (cluster_id BIGINT NOT NULL, desired_host_role_mapping INTEGER NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name));
+
+CREATE TABLE roles (role_name VARCHAR(255) NOT NULL, PRIMARY KEY (role_name));
+
+CREATE TABLE users (user_id INTEGER, ldap_user INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL, create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, user_password VARCHAR(255), PRIMARY KEY (user_id), UNIQUE (ldap_user, user_name));
+
+CREATE TABLE execution_command (command BLOB, task_id BIGINT NOT NULL, PRIMARY KEY (task_id));
+
+CREATE TABLE host_role_command (task_id BIGINT NOT NULL, attempt_count SMALLINT NOT NULL, event VARCHAR(32000) NOT NULL, exitcode INTEGER NOT NULL, host_name VARCHAR(255) NOT NULL, last_attempt_time BIGINT NOT NULL, request_id BIGINT NOT NULL, role VARCHAR(255), stage_id BIGINT NOT NULL, start_time BIGINT NOT NULL, status VARCHAR(255), std_error BLOB, std_out BLOB, role_command VARCHAR(255), PRIMARY KEY (task_id));
+
+CREATE TABLE role_success_criteria (role VARCHAR(255) NOT NULL, request_id BIGINT NOT NULL, stage_id BIGINT NOT NULL, success_factor FLOAT NOT NULL, PRIMARY KEY (role, request_id, stage_id));
+
+CREATE TABLE stage (stage_id BIGINT NOT NULL, request_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, log_info VARCHAR(255) NOT NULL, request_context VARCHAR(255), PRIMARY KEY (stage_id, request_id));
+
+CREATE TABLE ClusterHostMapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, host_name));
+
+CREATE TABLE user_roles (role_name VARCHAR(255) NOT NULL, user_id INTEGER NOT NULL, PRIMARY KEY (role_name, user_id));
+
+CREATE TABLE key_value_store ("key" VARCHAR(255), "value" VARCHAR(20000), PRIMARY KEY("key"));
+
+CREATE TABLE hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
+
+CREATE TABLE metainfo ("metainfo_key" VARCHAR(255), "metainfo_value" VARCHAR(20000), PRIMARY KEY("metainfo_key"));
+
+CREATE TABLE ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, "value" BIGINT NOT NULL);
+
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT FK_clusterconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentdesiredstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT FK_servicecomponentdesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT FK_servicedesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE role_success_criteria ADD CONSTRAINT FK_role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_role_name FOREIGN KEY (role_name) REFERENCES roles (role_name);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+
+-- BEGIN;
+
+insert into ambari_sequences(sequence_name, "value")
+select 'cluster_id_seq', 1 FROM SYSIBM.SYSDUMMY1
+union all
+select 'user_id_seq', 2 FROM SYSIBM.SYSDUMMY1
+union all
+select 'host_role_command_id_seq', 1 FROM SYSIBM.SYSDUMMY1;
+
+insert into Roles(role_name)
+select 'admin' FROM SYSIBM.SYSDUMMY1
+union all
+select 'user' FROM SYSIBM.SYSDUMMY1;
+
+insert into Users(user_id, user_name, user_password)
+select 1,'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00' FROM SYSIBM.SYSDUMMY1;
+
+insert into user_roles(role_name, user_id)
+select 'admin',1 FROM SYSIBM.SYSDUMMY1;
+
+insert into metainfo("metainfo_key", "metainfo_value")
+select 'version','1.3.0' FROM SYSIBM.SYSDUMMY1;
+
+-- COMMIT;
+
+-- ambari log4j DDL
+
+
+CREATE TABLE workflow (
+  workflowId VARCHAR(20000), workflowName VARCHAR(20000),
+  parentWorkflowId VARCHAR(20000),  
+  workflowContext VARCHAR(20000), userName VARCHAR(20000),
+  startTime BIGINT, lastUpdateTime BIGINT,
+  numJobsTotal INTEGER, numJobsCompleted INTEGER,
+  inputBytes BIGINT, outputBytes BIGINT,
+  duration BIGINT,
+  PRIMARY KEY (workflowId),
+  FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
+);
+
+
+
+CREATE TABLE job (
+  jobId VARCHAR(20000), workflowId VARCHAR(20000), jobName VARCHAR(20000), workflowEntityName VARCHAR(20000),
+  userName VARCHAR(20000), queue VARCHAR(20000), acls VARCHAR(20000), confPath VARCHAR(20000), 
+  submitTime BIGINT, launchTime BIGINT, finishTime BIGINT, 
+  maps INTEGER, reduces INTEGER, status VARCHAR(20000), priority VARCHAR(20000), 
+  finishedMaps INTEGER, finishedReduces INTEGER, 
+  failedMaps INTEGER, failedReduces INTEGER, 
+  mapsRuntime BIGINT, reducesRuntime BIGINT,
+  mapCounters VARCHAR(20000), reduceCounters VARCHAR(20000), jobCounters VARCHAR(20000), 
+  inputBytes BIGINT, outputBytes BIGINT,
+  PRIMARY KEY(jobId),
+  FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
+);
+
+
+
+CREATE TABLE task (
+  taskId VARCHAR(20000), jobId VARCHAR(20000), taskType VARCHAR(20000), splits VARCHAR(20000), 
+  startTime BIGINT, finishTime BIGINT, status VARCHAR(20000), error VARCHAR(20000), counters VARCHAR(20000), 
+  failedAttempt VARCHAR(20000), 
+  PRIMARY KEY(taskId), 
+  FOREIGN KEY(jobId) REFERENCES job(jobId)
+);
+
+
+
+CREATE TABLE taskAttempt (
+  taskAttemptId VARCHAR(20000), taskId VARCHAR(20000), jobId VARCHAR(20000), taskType VARCHAR(20000), taskTracker VARCHAR(20000), 
+  startTime BIGINT, finishTime BIGINT, 
+  mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT, 
+  locality VARCHAR(20000), avataar VARCHAR(20000), 
+  status VARCHAR(20000), error VARCHAR(20000), counters VARCHAR(20000), 
+  inputBytes BIGINT, outputBytes BIGINT,
+  PRIMARY KEY(taskAttemptId), 
+  FOREIGN KEY(jobId) REFERENCES job(jobId), 
+  FOREIGN KEY(taskId) REFERENCES task(taskId)
+); 
+
+
+
+CREATE TABLE hdfsEvent (
+  timestamp BIGINT,
+  userName VARCHAR(20000),
+  clientIP VARCHAR(20000),
+  operation VARCHAR(20000),
+  srcPath VARCHAR(20000),
+  dstPath VARCHAR(20000),
+  permissions VARCHAR(20000)
+);
+
+
+
+CREATE TABLE mapreduceEvent (
+  timestamp BIGINT,
+  userName VARCHAR(20000),
+  clientIP VARCHAR(20000),
+  operation VARCHAR(20000),
+  target VARCHAR(20000),
+  result VARCHAR(20000),
+  description VARCHAR(20000),
+  permissions VARCHAR(20000)
+);
+
+
+
+CREATE TABLE clusterEvent (
+  timestamp BIGINT, 
+  service VARCHAR(20000), status VARCHAR(20000), 
+  error VARCHAR(20000), data VARCHAR(20000) , 
+  host VARCHAR(20000), rack VARCHAR(20000)
+);
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/e37c2b57/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.4.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.4.sql b/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.4.sql
new file mode 100644
index 0000000..23001f1
--- /dev/null
+++ b/ambari-server/src/test/resources/ddl-scripts/Ambari-DDL-Derby-1.2.4.sql
@@ -0,0 +1,264 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+
+CREATE TABLE clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+
+
+
+CREATE TABLE clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data VARCHAR(32000) NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (cluster_id, type_name, version_tag));
+
+
+
+CREATE TABLE clusterconfigmapping (cluster_id bigint NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
+
+
+
+CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
+
+
+
+CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+
+
+
+CREATE TABLE componentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, service_name, config_type));
+
+
+
+CREATE TABLE hostcomponentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
+
+
+
+CREATE TABLE hcdesiredconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
+
+
+
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+
+
+
+CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+
+
+
+CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, ph_cpu_count INTEGER, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, disks_info VARCHAR(10000) NOT NULL, host_attributes VARCHAR(20000) NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), public_host_name VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
+
+
+
+CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL,  PRIMARY KEY (host_name));
+
+
+
+CREATE TABLE servicecomponentdesiredstate (component_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (component_name, cluster_id, service_name));
+
+
+
+CREATE TABLE serviceconfigmapping (cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name, config_type));
+
+
+
+CREATE TABLE servicedesiredstate (cluster_id BIGINT NOT NULL, desired_host_role_mapping INTEGER NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name));
+
+
+
+CREATE TABLE roles (role_name VARCHAR(255) NOT NULL, PRIMARY KEY (role_name));
+
+
+
+CREATE TABLE users (user_id INTEGER, ldap_user INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL, create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, user_password VARCHAR(255), PRIMARY KEY (user_id), UNIQUE (ldap_user, user_name));
+
+
+CREATE TABLE execution_command (command BLOB, task_id BIGINT NOT NULL, PRIMARY KEY (task_id));
+
+
+CREATE TABLE host_role_command (task_id BIGINT NOT NULL, attempt_count SMALLINT NOT NULL, event VARCHAR(32000) NOT NULL, exitcode INTEGER NOT NULL, host_name VARCHAR(255) NOT NULL, last_attempt_time BIGINT NOT NULL, request_id BIGINT NOT NULL, role VARCHAR(255), stage_id BIGINT NOT NULL, start_time BIGINT NOT NULL, status VARCHAR(255), std_error BLOB, std_out BLOB, role_command VARCHAR(255), PRIMARY KEY (task_id));
+
+
+CREATE TABLE role_success_criteria (role VARCHAR(255) NOT NULL, request_id BIGINT NOT NULL, stage_id BIGINT NOT NULL, success_factor FLOAT NOT NULL, PRIMARY KEY (role, request_id, stage_id));
+
+
+CREATE TABLE stage (stage_id BIGINT NOT NULL, request_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, log_info VARCHAR(255) NOT NULL, request_context VARCHAR(255), PRIMARY KEY (stage_id, request_id));
+
+
+CREATE TABLE ClusterHostMapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, host_name));
+
+
+CREATE TABLE user_roles (role_name VARCHAR(255) NOT NULL, user_id INTEGER NOT NULL, PRIMARY KEY (role_name, user_id));
+
+
+CREATE TABLE key_value_store ("key" VARCHAR(255), "value" VARCHAR(20000), PRIMARY KEY("key"));
+
+
+CREATE TABLE hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
+
+
+CREATE TABLE metainfo ("metainfo_key" VARCHAR(255), "metainfo_value" VARCHAR(20000), PRIMARY KEY("metainfo_key"));
+
+
+CREATE TABLE ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, "value" BIGINT NOT NULL);
+
+
+
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT FK_clusterconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentdesiredstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT FK_servicecomponentdesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_config_tag FOREIGN KEY (cluster_id, config_type, config_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT FK_servicedesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE role_success_criteria ADD CONSTRAINT FK_role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_role_name FOREIGN KEY (role_name) REFERENCES roles (role_name);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+
+-- BEGIN;
+
+insert into ambari_sequences(sequence_name, "value")
+select 'cluster_id_seq', 1 FROM SYSIBM.SYSDUMMY1
+union all
+select 'user_id_seq', 2 FROM SYSIBM.SYSDUMMY1
+union all
+select 'host_role_command_id_seq', 1 FROM SYSIBM.SYSDUMMY1;
+
+insert into Roles(role_name)
+select 'admin' FROM SYSIBM.SYSDUMMY1
+union all
+select 'user' FROM SYSIBM.SYSDUMMY1;
+
+insert into Users(user_id, user_name, user_password)
+select 1,'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00' FROM SYSIBM.SYSDUMMY1;
+
+insert into user_roles(role_name, user_id)
+select 'admin',1 FROM SYSIBM.SYSDUMMY1;
+
+insert into metainfo("metainfo_key", "metainfo_value")
+select 'version','1.3.0' FROM SYSIBM.SYSDUMMY1;
+
+-- COMMIT;
+
+-- ambari log4j DDL
+
+
+CREATE TABLE workflow (
+  workflowId VARCHAR(20000), workflowName VARCHAR(20000),
+  parentWorkflowId VARCHAR(20000),  
+  workflowContext VARCHAR(20000), userName VARCHAR(20000),
+  startTime BIGINT, lastUpdateTime BIGINT,
+  numJobsTotal INTEGER, numJobsCompleted INTEGER,
+  inputBytes BIGINT, outputBytes BIGINT,
+  duration BIGINT,
+  PRIMARY KEY (workflowId),
+  FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
+);
+
+
+
+CREATE TABLE job (
+  jobId VARCHAR(20000), workflowId VARCHAR(20000), jobName VARCHAR(20000), workflowEntityName VARCHAR(20000),
+  userName VARCHAR(20000), queue VARCHAR(20000), acls VARCHAR(20000), confPath VARCHAR(20000), 
+  submitTime BIGINT, launchTime BIGINT, finishTime BIGINT, 
+  maps INTEGER, reduces INTEGER, status VARCHAR(20000), priority VARCHAR(20000), 
+  finishedMaps INTEGER, finishedReduces INTEGER, 
+  failedMaps INTEGER, failedReduces INTEGER, 
+  mapsRuntime BIGINT, reducesRuntime BIGINT,
+  mapCounters VARCHAR(20000), reduceCounters VARCHAR(20000), jobCounters VARCHAR(20000), 
+  inputBytes BIGINT, outputBytes BIGINT,
+  PRIMARY KEY(jobId),
+  FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
+);
+
+
+
+CREATE TABLE task (
+  taskId VARCHAR(20000), jobId VARCHAR(20000), taskType VARCHAR(20000), splits VARCHAR(20000), 
+  startTime BIGINT, finishTime BIGINT, status VARCHAR(20000), error VARCHAR(20000), counters VARCHAR(20000), 
+  failedAttempt VARCHAR(20000), 
+  PRIMARY KEY(taskId), 
+  FOREIGN KEY(jobId) REFERENCES job(jobId)
+);
+
+
+
+CREATE TABLE taskAttempt (
+  taskAttemptId VARCHAR(20000), taskId VARCHAR(20000), jobId VARCHAR(20000), taskType VARCHAR(20000), taskTracker VARCHAR(20000), 
+  startTime BIGINT, finishTime BIGINT, 
+  mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT, 
+  locality VARCHAR(20000), avataar VARCHAR(20000), 
+  status VARCHAR(20000), error VARCHAR(20000), counters VARCHAR(20000), 
+  inputBytes BIGINT, outputBytes BIGINT,
+  PRIMARY KEY(taskAttemptId), 
+  FOREIGN KEY(jobId) REFERENCES job(jobId), 
+  FOREIGN KEY(taskId) REFERENCES task(taskId)
+); 
+
+
+
+CREATE TABLE hdfsEvent (
+  timestamp BIGINT,
+  userName VARCHAR(20000),
+  clientIP VARCHAR(20000),
+  operation VARCHAR(20000),
+  srcPath VARCHAR(20000),
+  dstPath VARCHAR(20000),
+  permissions VARCHAR(20000)
+);
+
+
+
+CREATE TABLE mapreduceEvent (
+  timestamp BIGINT,
+  userName VARCHAR(20000),
+  clientIP VARCHAR(20000),
+  operation VARCHAR(20000),
+  target VARCHAR(20000),
+  result VARCHAR(20000),
+  description VARCHAR(20000),
+  permissions VARCHAR(20000)
+);
+
+
+
+CREATE TABLE clusterEvent (
+  timestamp BIGINT, 
+  service VARCHAR(20000), status VARCHAR(20000), 
+  error VARCHAR(20000), data VARCHAR(20000) , 
+  host VARCHAR(20000), rack VARCHAR(20000)
+);
+
+
+


Mime
View raw message