cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ni...@apache.org
Subject [19/57] [abbrv] [partial] merge master
Date Mon, 13 May 2013 12:25:49 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
new file mode 100755
index 0000000..0b9ff1a
--- /dev/null
+++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
@@ -0,0 +1,816 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.capacity.dao;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.capacity.Capacity;
+import com.cloud.capacity.CapacityVO;
+import com.cloud.storage.Storage;
+import com.cloud.utils.Pair;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.GenericSearchBuilder;
+import com.cloud.utils.db.JoinBuilder.JoinType;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.SearchCriteria.Func;
+import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@Component
+@Local(value = { CapacityDao.class })
+public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements CapacityDao {
+    private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
+
+    private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
+    private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
+
+    private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id  FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL)   INNER JOIN `cloud`.`cluster_details` cluster_details ON (cluster.id = cluster_details.cluster_id ) WHERE ";
+    private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value ) - used_capacity + reserved_capacity) >= ? AND capacity.cluster_id IN (SELECT distinct capacity.cluster_id  FROM `cloud`.`op_host_capacity` capacity INNER JOIN  `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id ) WHERE ";
+    private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value) - used_capacity + reserved_capacity) >= ?) ";
+
+    private final SearchBuilder<CapacityVO> _hostIdTypeSearch;
+    private final SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
+    private final SearchBuilder<CapacityVO> _allFieldsSearch;
+    @Inject protected PrimaryDataStoreDao _storagePoolDao;
+
+
+    private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT  host_capacity.host_id FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id = cluster_details.cluster_id) AND  host.type = ? AND cluster_details.name='cpuOvercommitRatio' AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity) >= ? and host_capacity.capacity_type = '1' " +
+               " AND  host_capacity.host_id IN (SELECT capacity.host_id FROM `cloud`.`op_host_capacity` capacity JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id= cluster_details.cluster_id) where capacity_type='0' AND cluster_details.name='memoryOvercommitRatio' AND ((total_capacity* cluster_details.value) - used_capacity ) >= ?)) ";
+
+    private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity WHERE ";
+
+    private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2= " AND capacity_type = ?  AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
+
+    private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE ";
+
+    private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= " AND capacity_type = ?  AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
+
+    private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT capacity.pod_id  FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " +
+                                                          " ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id = cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? ";
+
+    private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC ";
+
+    private static final String ORDER_PODS_BY_AGGREGATE_OVERCOMMIT_CAPACITY ="SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE data_center_id=? AND capacity_type = ?  AND cluster_details.name = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
+
+    private static final String LIST_CAPACITY_BY_RESOURCE_STATE = "SELECT capacity.data_center_id, sum(capacity.used_capacity), sum(capacity.reserved_quantity), sum(capacity.total_capacity), capacity_capacity_type "+
+                                                                  "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+
+                                                                  "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+
+                                                                  "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+
+                                                                  "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+
+                                                                  "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) ";
+    
+    private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " +
+                                                                         "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+
+                                                                         " capacity.capacity_type, capacity.data_center_id "+
+                                                                         "FROM `cloud`.`op_host_capacity` capacity "+
+                                                                         "WHERE  total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'";
+    private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id, capacity_type order by percent desc limit ";
+    private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 =  "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," +
+            " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," +
+            "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," +
+            "capacity.capacity_type, capacity.data_center_id, pod_id FROM `cloud`.`op_host_capacity` capacity WHERE  total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' ";
+
+    private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id, capacity_type order by percent desc limit ";
+
+    private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," +
+            " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," +
+            "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," +
+            "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id FROM `cloud`.`op_host_capacity` capacity WHERE  total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' ";
+
+
+    private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id, capacity_type order by percent desc limit ";
+    private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity` SET capacity_state = ? WHERE ";
+
+    private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT clusterList.cluster_id " +
+                       "FROM (	SELECT cluster.cluster_id cluster_id, ( (sum(cluster.used) + sum(cluster.reserved) + ?)/sum(cluster.total) ) ratio, cluster.configValue value " +
+                                "FROM (	SELECT capacity.cluster_id cluster_id, capacity.used_capacity used, capacity.reserved_capacity reserved, capacity.total_capacity total, " +
+                                            "CASE (SELECT count(*) FROM `cloud`.`cluster_details` details WHERE details.cluster_id = capacity.cluster_id AND details.name = ? ) " +
+                                                "WHEN 1 THEN (	SELECT details.value FROM `cloud`.`cluster_details` details WHERE details.cluster_id = capacity.cluster_id AND details.name = ? ) " +
+                                                "ELSE (	SELECT config.value FROM `cloud`.`configuration` config WHERE config.name = ?) " +
+                                            "END configValue " +
+                                        "FROM `cloud`.`op_host_capacity` capacity " +
+                                        "WHERE capacity.data_center_id = ? AND capacity.capacity_type = ? AND capacity.total_capacity > 0) cluster " +
+
+                                "GROUP BY cluster.cluster_id)  clusterList " +
+                        "WHERE clusterList.ratio > clusterList.value; ";
+
+
+
+    public CapacityDaoImpl() {
+        _hostIdTypeSearch = createSearchBuilder();
+        _hostIdTypeSearch.and("hostId", _hostIdTypeSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ);
+        _hostIdTypeSearch.and("type", _hostIdTypeSearch.entity().getCapacityType(), SearchCriteria.Op.EQ);
+        _hostIdTypeSearch.done();
+
+        _hostOrPoolIdSearch = createSearchBuilder();
+        _hostOrPoolIdSearch.and("hostId", _hostOrPoolIdSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ);
+        _hostOrPoolIdSearch.done();
+
+        _allFieldsSearch = createSearchBuilder();
+        _allFieldsSearch.and("id", _allFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("hostId", _allFieldsSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("zoneId", _allFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("podId", _allFieldsSearch.entity().getPodId(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("clusterId", _allFieldsSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("capacityType", _allFieldsSearch.entity().getCapacityType(), SearchCriteria.Op.EQ);
+        _allFieldsSearch.and("capacityState", _allFieldsSearch.entity().getCapacityState(), SearchCriteria.Op.EQ);
+
+        _allFieldsSearch.done();
+    }
+          
+    @Override
+    public  List<Long> listClustersCrossingThreshold(short capacityType, Long zoneId, String configName, long compute_requested){
+
+         Transaction txn = Transaction.currentTxn();
+         PreparedStatement pstmt = null;
+         List<Long> result = new ArrayList<Long>();         
+         StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD);
+         // during listing the clusters that cross the threshold
+         // we need to check with disabled thresholds of each cluster if not defined at cluster consider the global value
+         try {
+             pstmt = txn.prepareAutoCloseStatement(sql.toString());
+             pstmt.setLong(1,compute_requested);
+             pstmt.setString(2, configName);
+             pstmt.setString(3, configName);
+             pstmt.setString(4, configName);
+             pstmt.setLong(5,zoneId);
+             pstmt.setShort(6,capacityType);
+
+             ResultSet rs = pstmt.executeQuery();
+             while (rs.next()) {
+                 result.add(rs.getLong(1));
+             }
+             return result;
+         } catch (SQLException e) {
+             throw new CloudRuntimeException("DB Exception on: " + sql, e);
+         } catch (Throwable e) {
+        	 throw new CloudRuntimeException("Caught: " + sql, e);
+         } 
+     }
+
+    /*public static String preparePlaceHolders(int length) {
+        StringBuilder builder = new StringBuilder();
+        for (int i = 0; i < length;) {
+            builder.append("?");
+            if (++i < length) {
+                builder.append(",");
+            }
+        }
+        return builder.toString();
+    }
+
+    public static void setValues(PreparedStatement preparedStatement, Object... values) throws SQLException {
+        for (int i = 0; i < values.length; i++) {
+            preparedStatement.setObject(i + 1, values[i]);
+        }
+    }*/
+
+
+    @Override
+    public  List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, String resource_state){
+
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<SummedCapacity> result = new ArrayList<SummedCapacity>();
+
+        StringBuilder sql = new StringBuilder(LIST_CAPACITY_BY_RESOURCE_STATE);           
+        List<Long> resourceIdList = new ArrayList<Long>();
+
+        if (zoneId != null){
+            sql.append(" AND capacity.data_center_id = ?");
+            resourceIdList.add(zoneId);
+        }
+        if (podId != null){
+            sql.append(" AND capacity.pod_id = ?");
+            resourceIdList.add(podId);
+        }
+        if (clusterId != null){
+            sql.append(" AND capacity.cluster_id = ?");
+            resourceIdList.add(clusterId);
+        }
+        if (capacityType != null){
+            sql.append(" AND capacity.capacity_type = ?");
+            resourceIdList.add(capacityType.longValue());
+        }   
+
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setString(1, resource_state);
+            pstmt.setString(2, resource_state);
+            pstmt.setString(3, resource_state);
+            pstmt.setString(4, resource_state);
+            for (int i = 0; i < resourceIdList.size(); i++){                
+                pstmt.setLong( 5+i, resourceIdList.get(i));
+            }            
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                SummedCapacity summedCapacity = new SummedCapacity(rs.getLong(2), rs.getLong(3), rs.getLong(4), (short)rs.getLong(5), null, null, rs.getLong(1));
+                result.add(summedCapacity);
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }        
+    }
+
+    @Override
+    public  List<SummedCapacity> listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit){
+
+        StringBuilder finalQuery = new StringBuilder(); 
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<SummedCapacity> result = new ArrayList<SummedCapacity>();
+
+        List<Long> resourceIdList = new ArrayList<Long>();
+
+        switch(level){
+        case 1: // List all the capacities grouped by zone, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1);
+            break;
+
+        case 2: // List all the capacities grouped by pod, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1);
+            break;
+
+        case 3: // List all the capacities grouped by cluster, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1);
+            break;
+        }
+
+        if (zoneId != null){
+            finalQuery.append(" AND data_center_id = ?" );
+            resourceIdList.add(zoneId);
+        }
+        if (podId != null){
+            finalQuery.append(" AND pod_id = ?" );
+            resourceIdList.add(podId);
+        }
+        if (clusterId != null){
+            finalQuery.append(" AND cluster_id = ?" );
+            resourceIdList.add(clusterId );
+        }
+        if (capacityType != null){
+            finalQuery.append(" AND capacity_type = ?");
+            resourceIdList.add(capacityType.longValue() );
+        }
+
+        switch(level){
+        case 1: // List all the capacities grouped by zone, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2);
+            break;
+
+        case 2: // List all the capacities grouped by pod, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2);
+            break;
+
+        case 3: // List all the capacities grouped by cluster, capacity Type
+            finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2);
+            break;
+        }
+
+        finalQuery.append("?");
+        resourceIdList.add((long) limit);
+        
+        try {
+            pstmt = txn.prepareAutoCloseStatement(finalQuery.toString());
+            for (int i = 0; i < resourceIdList.size(); i++){
+                pstmt.setLong(1+i, resourceIdList.get(i));
+            }
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                Long capacityPodId = null;
+                Long capacityClusterId = null;
+
+                if(level != 1 && rs.getLong(6) != 0)
+                    capacityPodId = rs.getLong(6);
+                if(level == 3 && rs.getLong(7) != 0)
+                    capacityClusterId = rs.getLong(7);                   
+
+                SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getFloat(3),
+                        (short)rs.getLong(4), rs.getLong(5),
+                        capacityPodId, capacityClusterId);
+
+                result.add(summedCapacity);
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + finalQuery, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + finalQuery, e);
+        }                     
+
+    }
+
+    @Override
+    public  List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId){
+
+        GenericSearchBuilder<CapacityVO, SummedCapacity> SummedCapacitySearch = createSearchBuilder(SummedCapacity.class);
+        SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId());
+        SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity());
+        SummedCapacitySearch.select("sumReserved", Func.SUM, SummedCapacitySearch.entity().getReservedCapacity());
+        SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity());
+        SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType());        
+
+        if (zoneId==null && podId==null && clusterId==null){ // List all the capacities grouped by zone, capacity Type
+            SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getDataCenterId(), SummedCapacitySearch.entity().getCapacityType());            
+        }else {
+            SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType());
+        }
+
+        if (zoneId != null){
+            SummedCapacitySearch.and("dcId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ);
+        }
+        if (podId != null){
+            SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ);
+        }
+        if (clusterId != null){
+            SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ);
+        }
+        if (capacityType != null){
+            SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(), Op.EQ);	
+        }        
+
+        SummedCapacitySearch.done();
+
+
+        SearchCriteria<SummedCapacity> sc = SummedCapacitySearch.create();
+        if (zoneId != null){
+            sc.setParameters("dcId", zoneId);
+        }
+        if (podId != null){
+            sc.setParameters("podId", podId);
+        }
+        if (clusterId != null){
+            sc.setParameters("clusterId", clusterId);
+        }
+        if (capacityType != null){
+            sc.setParameters("capacityType", capacityType);
+        }
+
+        Filter filter = new Filter(CapacityVO.class, null, true, null, null);
+        List<SummedCapacity> results = customSearchIncludingRemoved(sc, filter);
+        return results;        
+
+    }
+
+    public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            txn.start();
+            String sql = null;
+            if (add) {
+                sql = ADD_ALLOCATED_SQL;
+            } else {
+                sql = SUBTRACT_ALLOCATED_SQL;
+            }
+            pstmt = txn.prepareAutoCloseStatement(sql);
+            pstmt.setLong(1, allocatedAmount);
+            pstmt.setLong(2, hostId);
+            pstmt.setShort(3, capacityType);
+            pstmt.executeUpdate(); // TODO:  Make sure exactly 1 row was updated?
+            txn.commit();
+        } catch (Exception e) {
+            txn.rollback();
+            s_logger.warn("Exception updating capacity for host: " + hostId, e);
+        }
+    }
+
+
+    @Override
+    public CapacityVO findByHostIdType(Long hostId, short capacityType) {
+        SearchCriteria<CapacityVO> sc = _hostIdTypeSearch.create();
+        sc.setParameters("hostId", hostId);
+        sc.setParameters("type", capacityType);
+        return findOneBy(sc);
+    }  
+
+    @Override
+    public List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){
+    Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<Long> result = new ArrayList<Long>();
+
+        StringBuilder sql = new StringBuilder(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1);
+
+        if(isZone){
+            sql.append("capacity.data_center_id = ?");
+        }else{
+            sql.append("capacity.pod_id = ?");
+        }
+        sql.append(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2);
+        if(isZone){
+            sql.append("capacity.data_center_id = ?");
+        }else{
+            sql.append("capacity.pod_id = ?");
+        }
+        sql.append(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3);
+
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setLong(1, id);
+            pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
+            pstmt.setString(3,"cpuOvercommitRatio");
+            pstmt.setLong(4, requiredCpu);
+            pstmt.setLong(5, id);
+            pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
+            pstmt.setString(7,"memoryOvercommitRatio");
+            pstmt.setLong(8, requiredRam);
+
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                result.add(rs.getLong(1));
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
+    }
+
+
+    @Override
+    public List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){
+    Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<Long> result = new ArrayList<Long>();
+
+        StringBuilder sql = new StringBuilder(LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY);
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setLong(1, clusterId);
+            pstmt.setString(2, hostType);
+            pstmt.setLong(3, requiredCpu);
+            pstmt.setLong(4, requiredRam);
+
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                result.add(rs.getLong(1));
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
+    }
+
+    public static class SummedCapacity {
+        public long sumUsed;
+        public long sumReserved;
+        public long sumTotal;
+        public Float percentUsed;
+        public short capacityType;
+        public Long clusterId;
+        public Long podId;
+        public Long dcId;
+        public SummedCapacity() {
+        }
+        public SummedCapacity(long sumUsed, long sumReserved, long sumTotal,
+                short capacityType, Long clusterId, Long podId) {
+            super();
+            this.sumUsed = sumUsed;
+            this.sumReserved = sumReserved;
+            this.sumTotal = sumTotal;
+            this.capacityType = capacityType;
+            this.clusterId = clusterId;
+            this.podId = podId;
+        }
+        public SummedCapacity(long sumUsed, long sumReserved, long sumTotal,
+                short capacityType, Long clusterId, Long podId, Long zoneId) {
+            this(sumUsed, sumReserved, sumTotal, capacityType, clusterId, podId);
+            this.dcId = zoneId;
+        }
+
+        public SummedCapacity(long sumUsed, long sumTotal, float percentUsed, short capacityType, Long zoneId, Long podId, Long clusterId) {
+            super();
+            this.sumUsed = sumUsed;
+            this.sumTotal = sumTotal;
+            this.percentUsed = percentUsed;
+            this.capacityType = capacityType;
+            this.clusterId = clusterId;
+            this.podId = podId;
+            this.dcId = zoneId;
+        }
+
+        public Short getCapacityType() {				
+            return capacityType;
+        }
+        public Long getUsedCapacity() {
+            return sumUsed;
+        }
+        public long getReservedCapacity() {
+            return sumReserved;
+        }
+        public Long getTotalCapacity() {
+            return sumTotal;
+        }
+        public Long getDataCenterId() {
+            return dcId;
+        }
+        public Long getClusterId() {
+            return clusterId;
+        }
+        public Long getPodId() {
+            return podId;
+        }
+        public Float getPercentUsed() {
+            return percentUsed;
+        }
+    }
+    @Override
+    public List<SummedCapacity> findByClusterPodZone(Long zoneId, Long podId, Long clusterId){
+
+        GenericSearchBuilder<CapacityVO, SummedCapacity> SummedCapacitySearch = createSearchBuilder(SummedCapacity.class);
+        SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity());
+        SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity());   
+        SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType());                                
+        SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType());
+
+        if(zoneId != null){
+            SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ);
+        }
+        if (podId != null){
+            SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ);
+        }
+        if (clusterId != null){
+            SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ);
+        }
+        SummedCapacitySearch.done();
+
+
+        SearchCriteria<SummedCapacity> sc = SummedCapacitySearch.create();
+        if (zoneId != null){
+            sc.setParameters("zoneId", zoneId);
+        }
+        if (podId != null){
+            sc.setParameters("podId", podId);
+        }
+        if (clusterId != null){
+            sc.setParameters("clusterId", clusterId);
+        }
+
+        return customSearchIncludingRemoved(sc, null);         
+    }
+
+    @Override
+    public List<SummedCapacity> findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId){
+
+        GenericSearchBuilder<CapacityVO, SummedCapacity> SummedCapacitySearch = createSearchBuilder(SummedCapacity.class);
+        SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity());
+        SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity());   
+        SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType());
+        SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(), Op.EQ);
+
+        SearchBuilder<StoragePoolVO>  nonSharedStorage = _storagePoolDao.createSearchBuilder();
+        nonSharedStorage.and("poolTypes", nonSharedStorage.entity().getPoolType(), SearchCriteria.Op.IN);
+        SummedCapacitySearch.join("nonSharedStorage", nonSharedStorage, nonSharedStorage.entity().getId(), SummedCapacitySearch.entity().getHostOrPoolId(), JoinType.INNER);
+        nonSharedStorage.done();        
+
+        if(zoneId != null){
+            SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ);
+        }
+        if (podId != null){
+            SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ);
+        }
+        if (clusterId != null){
+            SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ);
+        }
+        SummedCapacitySearch.done();
+
+
+        SearchCriteria<SummedCapacity> sc = SummedCapacitySearch.create();
+        sc.setJoinParameters("nonSharedStorage", "poolTypes", Storage.getNonSharedStoragePoolTypes().toArray());
+        sc.setParameters("capacityType", Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED);
+        if (zoneId != null){
+            sc.setParameters("zoneId", zoneId);
+        }
+        if (podId != null){
+            sc.setParameters("podId", podId);
+        }
+        if (clusterId != null){
+            sc.setParameters("clusterId", clusterId);
+        }
+
+        return customSearchIncludingRemoved(sc, null);         
+    }
+
+    @Override
+    public boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId) {
+        SearchCriteria<CapacityVO> sc = _allFieldsSearch.create();
+
+        if (capacityType != null) {
+            sc.setParameters("capacityType", capacityType);
+        }
+
+        if (zoneId != null) {
+            sc.setParameters("zoneId", zoneId);
+        }
+
+        if (podId != null) {
+            sc.setParameters("podId", podId);
+        }
+
+        if (clusterId != null) {
+            sc.setParameters("clusterId", clusterId);
+        }
+
+        if (hostId != null) {
+            sc.setParameters("hostId", hostId);
+        }
+
+        return remove(sc) > 0;
+    }
+
+    @Override
+    public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone){
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<Long> result = new ArrayList<Long>();
+        Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
+        StringBuilder sql = new StringBuilder();
+        if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY)  {
+             sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1);
+        }
+        else {
+             sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1);
+        }
+
+
+        if(isZone){
+            sql.append(" data_center_id = ?");
+        }else{
+            sql.append(" pod_id = ?");
+        }
+        if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY){
+           sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2);
+        }
+        else {
+           sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2);
+        }
+
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setLong(1, id);
+            pstmt.setShort(2,capacityTypeForOrdering);
+
+            if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_CPU){
+                pstmt.setString(3,"cpuOvercommitRatio");
+            }
+            else if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_MEMORY){
+                pstmt.setString(3,"memoryOvercommitRatio");
+            }
+
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                Long clusterId = rs.getLong(1);
+                result.add(clusterId);
+                clusterCapacityMap.put(clusterId, rs.getDouble(2));
+            }
+            return new Pair<List<Long>, Map<Long, Double>>(result, clusterCapacityMap);
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
+    }
+
+    @Override
+    public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<Long> result = new ArrayList<Long>();
+
+        StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE);
+                      sql.append("AND capacity.pod_id IN (");
+                      sql.append(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE);
+                      sql.append(")");
+
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setLong(1, zoneId);
+            pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
+            pstmt.setString(3, "cpuOvercommitRatio");
+            pstmt.setLong(4, requiredCpu);
+            pstmt.setLong(5, zoneId);
+            pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
+            pstmt.setString(7,"memoryOvercommitRatio" );
+            pstmt.setLong(8, requiredRam);
+
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                result.add(rs.getLong(1));
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
+    }
+
+    @Override
+    public Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<Long> result = new ArrayList<Long>();
+        Map<Long, Double> podCapacityMap = new HashMap<Long, Double>();
+        
+        StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY);
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setLong(2, zoneId);
+            pstmt.setShort(3, capacityTypeForOrdering);
+            
+            if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
+                pstmt.setString(3, "cpuOvercommitRatio");
+            }
+            
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                Long podId = rs.getLong(1);
+                result.add(podId);
+                podCapacityMap.put(podId, rs.getDouble(2));
+            }
+            return new Pair<List<Long>, Map<Long, Double>>(result, podCapacityMap);
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
+    }
+
+    @Override
+    public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) {
+        Transaction txn = Transaction.currentTxn();
+        StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE);
+        List<Long> resourceIdList = new ArrayList<Long>();
+
+        if (dcId != null){
+            sql.append(" data_center_id = ?");
+            resourceIdList.add(dcId);
+        }
+        if (podId != null){
+            sql.append(" pod_id = ?");
+            resourceIdList.add(podId);
+        }
+        if (clusterId != null){
+            sql.append(" cluster_id = ?");
+            resourceIdList.add(clusterId);
+        }
+        if (hostId != null){
+            sql.append(" host_id = ?");
+            resourceIdList.add(hostId);
+        }
+
+        PreparedStatement pstmt = null;
+        try {       
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+            pstmt.setString(1, capacityState);
+            for (int i = 0; i < resourceIdList.size(); i++){                
+                pstmt.setLong( 2+i, resourceIdList.get(i));
+            }            
+            pstmt.executeUpdate();
+        } catch (Exception e) {
+            s_logger.warn("Error updating CapacityVO", e);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/certificate/CertificateVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/certificate/CertificateVO.java b/engine/schema/src/com/cloud/certificate/CertificateVO.java
new file mode 100644
index 0000000..4f04760
--- /dev/null
+++ b/engine/schema/src/com/cloud/certificate/CertificateVO.java
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.certificate;
+
+import org.apache.cloudstack.api.InternalIdentity;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.Table;
+
+@Entity
+@Table(name="certificate")
+public class CertificateVO implements InternalIdentity {
+
+    @Id
+    @GeneratedValue(strategy=GenerationType.IDENTITY)
+    @Column(name="id")
+    private Long id = null;
+
+    @Column(name="certificate",length=65535)
+    private String certificate;
+
+    @Column(name="updated")
+    private String updated;
+
+    public CertificateVO() {}
+
+    public long getId() {
+        return id;
+    }
+    
+    public String getCertificate() {
+        return certificate;
+    }
+    public void setCertificate(String certificate) {
+        this.certificate = certificate;
+    }
+    
+    public String getUpdated(){
+    	return this.updated;
+    }
+    
+    public void setUpdated(String updated){
+    	this.updated = updated;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/certificate/dao/CertificateDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDao.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDao.java
new file mode 100644
index 0000000..9aedf9f
--- /dev/null
+++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDao.java
@@ -0,0 +1,24 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.certificate.dao;
+
+import com.cloud.certificate.CertificateVO;
+import com.cloud.utils.db.GenericDao;
+
+public interface CertificateDao extends GenericDao<CertificateVO, Long> {
+	public Long persistCustomCertToDb(String certStr, CertificateVO cert, Long managementServerId);
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
new file mode 100644
index 0000000..f071cea
--- /dev/null
+++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.certificate.dao;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import javax.ejb.Local;
+
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.certificate.CertificateVO;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.GenericDaoBase;
+
+@Component
+@Local(value={CertificateDao.class}) @DB(txn=false)
+public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long>  implements CertificateDao {
+	
+    private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class);
+    
+    public CertificateDaoImpl(){
+    	
+    }
+    
+	@Override
+	public Long persistCustomCertToDb(String certStr, CertificateVO cert, Long managementServerId){		
+	    BufferedInputStream f = null;
+		try 
+	    {
+        	cert.setCertificate(certStr);
+        	cert.setUpdated("Y");
+        	update(cert.getId(),cert);
+        	return cert.getId();
+	    }  catch (Exception e){
+	    	s_logger.warn("Unable to read the certificate: "+e);
+	    	return new Long(0);
+	    }
+	    finally 
+	    {
+	        if (f != null) 
+	        	try { f.close(); } catch (IOException ignored) { }
+	    }
+	}
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/ClusterInvalidSessionException.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/ClusterInvalidSessionException.java b/engine/schema/src/com/cloud/cluster/ClusterInvalidSessionException.java
new file mode 100644
index 0000000..8ac94f2
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/ClusterInvalidSessionException.java
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster;
+
+import com.cloud.exception.CloudException;
+
+public class ClusterInvalidSessionException extends CloudException {
+
+	private static final long serialVersionUID = -6636524194520997512L;
+
+    public ClusterInvalidSessionException(String message) {
+        super(message);
+    }
+
+    public ClusterInvalidSessionException(String message, Throwable th) {
+        super(message, th);
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/ManagementServerHostPeerVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/ManagementServerHostPeerVO.java b/engine/schema/src/com/cloud/cluster/ManagementServerHostPeerVO.java
new file mode 100644
index 0000000..e5e12ec
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/ManagementServerHostPeerVO.java
@@ -0,0 +1,120 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster;
+
+import java.util.Date;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.EnumType;
+import javax.persistence.Enumerated;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.Temporal;
+import javax.persistence.TemporalType;
+
+import com.cloud.utils.DateUtil;
+import org.apache.cloudstack.api.InternalIdentity;
+
+@Entity
+@Table(name="mshost_peer")
+public class ManagementServerHostPeerVO implements InternalIdentity {
+    
+    @Id
+    @GeneratedValue(strategy=GenerationType.IDENTITY)
+    @Column(name="id")
+    private long id;
+    
+    @Column(name="owner_mshost", updatable=true, nullable=false)
+    private long ownerMshost;
+    
+    @Column(name="peer_mshost", updatable=true, nullable=false)
+    private long peerMshost;
+    
+    @Column(name="peer_runid", updatable=true, nullable=false)
+    private long peerRunid;
+
+    @Column(name="peer_state", updatable = true, nullable=false)
+    @Enumerated(value=EnumType.STRING)
+    private ManagementServerHost.State peerState;
+    
+    @Temporal(TemporalType.TIMESTAMP)
+    @Column(name="last_update", updatable=true, nullable=true)
+    private Date lastUpdateTime;
+
+    public ManagementServerHostPeerVO() {
+    }
+    
+    public ManagementServerHostPeerVO(long ownerMshost, long peerMshost, long peerRunid, ManagementServerHost.State peerState) {
+        this.ownerMshost = ownerMshost;
+        this.peerMshost = peerMshost;
+        this.peerRunid = peerRunid;
+        this.peerState = peerState;
+        
+        this.lastUpdateTime = DateUtil.currentGMTTime();
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public long getOwnerMshost() {
+        return ownerMshost;
+    }
+
+    public void setOwnerMshost(long ownerMshost) {
+        this.ownerMshost = ownerMshost;
+    }
+
+    public long getPeerMshost() {
+        return peerMshost;
+    }
+
+    public void setPeerMshost(long peerMshost) {
+        this.peerMshost = peerMshost;
+    }
+
+    public long getPeerRunid() {
+        return peerRunid;
+    }
+
+    public void setPeerRunid(long peerRunid) {
+        this.peerRunid = peerRunid;
+    }
+
+    public ManagementServerHost.State getPeerState() {
+        return peerState;
+    }
+
+    public void setPeerState(ManagementServerHost.State peerState) {
+        this.peerState = peerState;
+    }
+
+    public Date getLastUpdateTime() {
+        return lastUpdateTime;
+    }
+
+    public void setLastUpdateTime(Date lastUpdateTime) {
+        this.lastUpdateTime = lastUpdateTime;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/ManagementServerHostVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/ManagementServerHostVO.java b/engine/schema/src/com/cloud/cluster/ManagementServerHostVO.java
new file mode 100644
index 0000000..31642e4
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/ManagementServerHostVO.java
@@ -0,0 +1,182 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster;
+
+import java.util.Date;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.EnumType;
+import javax.persistence.Enumerated;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.Temporal;
+import javax.persistence.TemporalType;
+
+import com.cloud.utils.db.GenericDao;
+import org.apache.cloudstack.api.InternalIdentity;
+
+@Entity
+@Table(name="mshost")
+public class ManagementServerHostVO implements ManagementServerHost {
+
+	@Id
+	@GeneratedValue(strategy=GenerationType.IDENTITY)
+	@Column(name="id")
+	private long id;
+
+	@Column(name="msid", updatable=true, nullable=false)
+	private long msid;
+	
+	@Column(name="runid", updatable=true, nullable=false)
+	private long runid;
+
+	@Column(name="name", updatable=true, nullable=true)
+	private String name;
+	
+    @Column(name="state", updatable = true, nullable=false)
+    @Enumerated(value=EnumType.STRING)
+	private ManagementServerHost.State state;
+	
+	@Column(name="version", updatable=true, nullable=true)
+	private String version;
+	
+	@Column(name="service_ip", updatable=true, nullable=false)
+	private String serviceIP;
+	
+	@Column(name="service_port", updatable=true, nullable=false)
+	private int servicePort;
+	
+    @Temporal(TemporalType.TIMESTAMP)
+    @Column(name="last_update", updatable=true, nullable=true)
+    private Date lastUpdateTime;
+    
+    @Column(name=GenericDao.REMOVED_COLUMN)
+    private Date removed;
+    
+	@Column(name="alert_count", updatable=true, nullable=false)
+	private int alertCount;
+
+    public ManagementServerHostVO() {
+    }
+    
+    public ManagementServerHostVO(long msid, long runid, String serviceIP, int servicePort, Date updateTime) {
+    	this.msid = msid;
+    	this.runid = runid;
+    	this.serviceIP = serviceIP;
+    	this.servicePort = servicePort;
+    	this.lastUpdateTime = updateTime;
+    }
+    
+	public long getId() {
+		return id;
+	}
+
+	public void setId(long id) {
+		this.id = id;
+	}
+	
+	public long getRunid() {
+		return runid;
+	}
+	
+	public void setRunid(long runid) {
+		this.runid = runid;
+	}
+
+	@Override
+	public long getMsid() {
+		return msid;
+	}
+
+	public void setMsid(long msid) {
+		this.msid = msid;
+	}
+	
+	public String getName() {
+		return name;
+	}
+	
+	public void setName(String name) {
+		this.name = name;
+	}
+	
+	@Override
+	public ManagementServerHost.State getState() {
+		return this.state;
+	}
+	
+	public void setState(ManagementServerHost.State state) {
+		this.state = state;
+	}
+	
+	@Override
+	public String getVersion() {
+		return version;
+	}
+	
+	public void setVersion(String version) {
+		this.version = version;
+	}
+	
+	public String getServiceIP() {
+		return serviceIP;
+	}
+
+	public void setServiceIP(String serviceIP) {
+		this.serviceIP = serviceIP;
+	}
+
+	public int getServicePort() {
+		return servicePort;
+	}
+
+	public void setServicePort(int servicePort) {
+		this.servicePort = servicePort;
+	}
+
+	public Date getLastUpdateTime() {
+		return lastUpdateTime;
+	}
+
+	public void setLastUpdateTime(Date lastUpdateTime) {
+		this.lastUpdateTime = lastUpdateTime;
+	}
+	
+	public Date getRemoved() {
+		return removed;
+	}
+	
+	public void setRemoved(Date removedTime) {
+		removed = removedTime;
+	}
+	
+	public int getAlertCount() {
+		return alertCount; 
+	}
+	
+	public void setAlertCount(int count) {
+		alertCount = count;
+	}
+	
+	@Override
+    public String toString() {
+        return new StringBuilder("ManagementServer[").append("-").append(id).append("-").append(msid).append("-").append(state).append("]").toString();
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/agentlb/HostTransferMapVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/agentlb/HostTransferMapVO.java b/engine/schema/src/com/cloud/cluster/agentlb/HostTransferMapVO.java
new file mode 100644
index 0000000..9c2edde
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/agentlb/HostTransferMapVO.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.agentlb;
+
+import java.util.Date;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Inheritance;
+import javax.persistence.InheritanceType;
+import javax.persistence.Table;
+
+import com.cloud.utils.db.GenericDao;
+import org.apache.cloudstack.api.InternalIdentity;
+
+@Entity
+@Table(name = "op_host_transfer")
+@Inheritance(strategy = InheritanceType.TABLE_PER_CLASS)
+public class HostTransferMapVO implements InternalIdentity {
+
+    public enum HostTransferState {
+        TransferRequested, TransferStarted;
+    }
+
+    @Id
+    @Column(name = "id")
+    private long id;
+
+    @Column(name = "initial_mgmt_server_id")
+    private long initialOwner;
+
+    @Column(name = "future_mgmt_server_id")
+    private long futureOwner;
+
+    @Column(name = "state")
+    private HostTransferState state;
+    
+    @Column(name=GenericDao.CREATED_COLUMN)
+    private Date created;
+
+    public HostTransferMapVO(long hostId, long initialOwner, long futureOwner) {
+        this.id = hostId;
+        this.initialOwner = initialOwner;
+        this.futureOwner = futureOwner;
+        this.state = HostTransferState.TransferRequested;
+    }
+
+    protected HostTransferMapVO() {
+    }
+
+    public long getInitialOwner() {
+        return initialOwner;
+    }
+
+    public long getFutureOwner() {
+        return futureOwner;
+    }
+
+    public HostTransferState getState() {
+        return state;
+    }
+
+    public void setInitialOwner(long initialOwner) {
+        this.initialOwner = initialOwner;
+    }
+
+    public void setFutureOwner(long futureOwner) {
+        this.futureOwner = futureOwner;
+    }
+
+    public void setState(HostTransferState state) {
+        this.state = state;
+    }
+
+    public long getId() {
+        return id;
+    }
+    
+    public Date getCreated() {
+        return created;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDao.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDao.java
new file mode 100644
index 0000000..6c1f782
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDao.java
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.agentlb.dao;
+
+import java.util.Date;
+import java.util.List;
+
+import com.cloud.cluster.agentlb.HostTransferMapVO;
+import com.cloud.cluster.agentlb.HostTransferMapVO.HostTransferState;
+import com.cloud.utils.db.GenericDao;
+
+public interface HostTransferMapDao extends GenericDao<HostTransferMapVO, Long> {
+
+    List<HostTransferMapVO> listHostsLeavingCluster(long currentOwnerId);
+
+    List<HostTransferMapVO> listHostsJoiningCluster(long futureOwnerId);
+
+    HostTransferMapVO startAgentTransfering(long hostId, long currentOwner, long futureOwner);
+
+    boolean completeAgentTransfer(long hostId);
+    
+    List<HostTransferMapVO> listBy(long futureOwnerId, HostTransferState state);
+    
+    HostTransferMapVO findActiveHostTransferMapByHostId(long hostId, Date cutTime);
+    
+    boolean startAgentTransfer(long hostId);
+    
+    HostTransferMapVO findByIdAndFutureOwnerId(long id, long futureOwnerId);
+    
+    HostTransferMapVO findByIdAndCurrentOwnerId(long id, long currentOwnerId);
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
new file mode 100644
index 0000000..cff4cfc
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
@@ -0,0 +1,146 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.agentlb.dao;
+
+import java.util.Date;
+import java.util.List;
+
+import javax.annotation.PostConstruct;
+import javax.ejb.Local;
+
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.cluster.agentlb.HostTransferMapVO;
+import com.cloud.cluster.agentlb.HostTransferMapVO.HostTransferState;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
+@Component
+@Local(value = { HostTransferMapDao.class })
+@DB(txn = false)
+public class HostTransferMapDaoImpl extends GenericDaoBase<HostTransferMapVO, Long> implements HostTransferMapDao {
+    private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
+
+    protected  SearchBuilder<HostTransferMapVO> AllFieldsSearch;
+    protected  SearchBuilder<HostTransferMapVO> IntermediateStateSearch;
+    protected  SearchBuilder<HostTransferMapVO> ActiveSearch;
+
+    public HostTransferMapDaoImpl() {
+       super();
+    }
+    
+    @PostConstruct
+    public void init() {
+    	 AllFieldsSearch = createSearchBuilder();
+         AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
+         AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+         AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+         AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
+         AllFieldsSearch.done();
+         
+         IntermediateStateSearch = createSearchBuilder();
+         IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+         IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+         IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
+         IntermediateStateSearch.done();
+         
+         ActiveSearch = createSearchBuilder();
+         ActiveSearch.and("created", ActiveSearch.entity().getCreated(),  SearchCriteria.Op.GT);
+         ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
+         ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
+         ActiveSearch.done();
+         
+    }
+
+    @Override
+    public List<HostTransferMapVO> listHostsLeavingCluster(long currentOwnerId) {
+        SearchCriteria<HostTransferMapVO> sc = IntermediateStateSearch.create();
+        sc.setParameters("initialOwner", currentOwnerId);
+
+        return listBy(sc);
+    }
+
+    @Override
+    public List<HostTransferMapVO> listHostsJoiningCluster(long futureOwnerId) {
+        SearchCriteria<HostTransferMapVO> sc = IntermediateStateSearch.create();
+        sc.setParameters("futureOwner", futureOwnerId);
+
+        return listBy(sc);
+    }
+    
+    @Override
+    public HostTransferMapVO startAgentTransfering(long hostId, long initialOwner, long futureOwner) {
+        HostTransferMapVO transfer = new HostTransferMapVO(hostId, initialOwner, futureOwner);
+        return persist(transfer); 
+    }
+
+    @Override
+    public boolean completeAgentTransfer(long hostId) {
+        return remove(hostId);
+    }
+    
+    @Override
+    public List<HostTransferMapVO> listBy(long futureOwnerId, HostTransferState state) {
+        SearchCriteria<HostTransferMapVO> sc = AllFieldsSearch.create();
+        sc.setParameters("futureOwner", futureOwnerId);
+        sc.setParameters("state", state);
+
+        return listBy(sc);
+    }
+    
+    @Override
+    public HostTransferMapVO findActiveHostTransferMapByHostId(long hostId, Date cutTime) {
+        SearchCriteria<HostTransferMapVO> sc = ActiveSearch.create();
+        sc.setParameters("id", hostId);
+        sc.setParameters("state", HostTransferState.TransferRequested);
+        sc.setParameters("created", cutTime);
+        
+        return findOneBy(sc);
+        
+    }
+    
+    @Override
+    public boolean startAgentTransfer(long hostId) {
+        HostTransferMapVO transfer = findById(hostId);
+        transfer.setState(HostTransferState.TransferStarted);
+        return update(hostId, transfer);
+    }
+    
+    
+    @Override
+    public HostTransferMapVO findByIdAndFutureOwnerId(long id, long futureOwnerId) {
+        SearchCriteria<HostTransferMapVO> sc = AllFieldsSearch.create();
+        sc.setParameters("futureOwner", futureOwnerId);
+        sc.setParameters("id", id);
+
+        return findOneBy(sc);
+    }
+    
+    
+    @Override
+    public HostTransferMapVO findByIdAndCurrentOwnerId(long id, long currentOwnerId) {
+        SearchCriteria<HostTransferMapVO> sc = AllFieldsSearch.create();
+        sc.setParameters("initialOwner", currentOwnerId);
+        sc.setParameters("id", id);
+
+        return findOneBy(sc);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDao.java b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDao.java
new file mode 100644
index 0000000..fec0679
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDao.java
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.dao;
+
+import java.util.Date;
+import java.util.List;
+
+import com.cloud.cluster.ManagementServerHost;
+import com.cloud.cluster.ManagementServerHost.State;
+import com.cloud.cluster.ManagementServerHostVO;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.GenericDao;
+
+public interface ManagementServerHostDao extends GenericDao<ManagementServerHostVO, Long> {
+    @Override
+    boolean remove(Long id);
+
+	ManagementServerHostVO findByMsid(long msid);
+	int increaseAlertCount(long id);
+	
+	void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate);
+	void update(long id, long runid, Date lastUpdate);
+	List<ManagementServerHostVO> getActiveList(Date cutTime);
+	List<ManagementServerHostVO> getInactiveList(Date cutTime);
+
+	void invalidateRunSession(long id, long runid);
+	
+	void update(long id, long runId, State state, Date lastUpdate);
+	
+	List<ManagementServerHostVO> listBy(ManagementServerHost.State...states);
+	public List<Long> listOrphanMsids();
+
+	ManagementServerHostVO findOneInUpState(Filter filter);
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java
new file mode 100644
index 0000000..3866da1
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java
@@ -0,0 +1,276 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.dao;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+
+import javax.ejb.Local;
+
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.cluster.ClusterInvalidSessionException;
+import com.cloud.cluster.ManagementServerHost;
+import com.cloud.cluster.ManagementServerHost.State;
+import com.cloud.cluster.ManagementServerHostVO;
+import com.cloud.utils.DateUtil;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@Component
+@Local(value={ManagementServerHostDao.class})
+public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServerHostVO, Long> implements ManagementServerHostDao {
+    private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class);
+    
+    private final SearchBuilder<ManagementServerHostVO> MsIdSearch;
+    private final SearchBuilder<ManagementServerHostVO> ActiveSearch;
+    private final SearchBuilder<ManagementServerHostVO> InactiveSearch;
+    private final SearchBuilder<ManagementServerHostVO> StateSearch;
+
+	@Override
+    public void invalidateRunSession(long id, long runid) {
+	    Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = txn.prepareAutoCloseStatement("update mshost set runid=0, state='Down' where id=? and runid=?");
+            pstmt.setLong(1, id);
+            pstmt.setLong(2, runid);
+            
+            pstmt.executeUpdate();
+        } catch (SQLException e) {
+        	throw new CloudRuntimeException("DB exception on " + pstmt.toString(), e);
+        }
+	}
+	
+	@Override
+    public ManagementServerHostVO findByMsid(long msid) {
+        SearchCriteria<ManagementServerHostVO> sc = MsIdSearch.create();
+        sc.setParameters("msid", msid);
+		
+		List<ManagementServerHostVO> l = listIncludingRemovedBy(sc);
+		if(l != null && l.size() > 0) {
+            return l.get(0);
+        }
+		 
+		return null;
+	}
+	
+	@Override
+    @DB
+	public void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            txn.start();
+            
+            pstmt = txn.prepareAutoCloseStatement("update mshost set name=?, version=?, service_ip=?, service_port=?, last_update=?, removed=null, alert_count=0, runid=?, state=? where id=?");
+            pstmt.setString(1, name);
+            pstmt.setString(2, version);
+            pstmt.setString(3, serviceIP);
+            pstmt.setInt(4, servicePort);
+            pstmt.setString(5, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
+            pstmt.setLong(6, runid);
+            pstmt.setString(7, ManagementServerHost.State.Up.toString());
+            pstmt.setLong(8, id);
+            
+            pstmt.executeUpdate();
+            txn.commit();
+        } catch(Exception e) {
+            s_logger.warn("Unexpected exception, ", e);
+        }
+	}
+	
+	@Override
+    @DB
+    public boolean remove(Long id) {
+        Transaction txn = Transaction.currentTxn();
+    
+        try {
+        	txn.start();
+        	
+        	ManagementServerHostVO msHost = findById(id);
+        	msHost.setState(ManagementServerHost.State.Down);
+        	super.remove(id);
+        	
+        	txn.commit();
+        	return true;
+        } catch(Exception e) {
+            s_logger.warn("Unexpected exception, ", e);
+        }
+        
+        return false;
+    }
+
+	@Override
+    @DB
+	public void update(long id, long runid, Date lastUpdate) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            txn.start();
+            
+            pstmt = txn.prepareAutoCloseStatement("update mshost set last_update=?, removed=null, alert_count=0 where id=? and runid=?");
+            pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
+            pstmt.setLong(2, id);
+            pstmt.setLong(3, runid);
+            
+            int count = pstmt.executeUpdate();
+            txn.commit();
+
+            if(count < 1) {
+                throw new CloudRuntimeException("Invalid cluster session detected", new ClusterInvalidSessionException("runid " + runid + " is no longer valid"));
+            }
+        } catch(Exception e) {
+            s_logger.warn("Unexpected exception, ", e);
+        }
+	}
+	
+	@Override
+    public List<ManagementServerHostVO> getActiveList(Date cutTime) {
+	    SearchCriteria<ManagementServerHostVO> sc = ActiveSearch.create();
+	    sc.setParameters("lastUpdateTime", cutTime);
+	    
+	    return listIncludingRemovedBy(sc);
+	}
+
+	@Override
+    public List<ManagementServerHostVO> getInactiveList(Date cutTime) {
+	    SearchCriteria<ManagementServerHostVO> sc = InactiveSearch.create();
+	    sc.setParameters("lastUpdateTime", cutTime);
+	    
+	    return listIncludingRemovedBy(sc);
+	}
+	
+	@Override
+    @DB
+	public int increaseAlertCount(long id) {
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        int changedRows = 0;
+        try {
+            txn.start();
+            
+            pstmt = txn.prepareAutoCloseStatement("update mshost set alert_count=alert_count+1 where id=? and alert_count=0");
+            pstmt.setLong(1, id);
+            
+            changedRows = pstmt.executeUpdate();
+            txn.commit();
+        } catch(Exception e) {
+            s_logger.warn("Unexpected exception, ", e);
+            txn.rollback();
+        }
+        
+        return changedRows;
+	}
+	
+	protected ManagementServerHostDaoImpl() {
+		MsIdSearch = createSearchBuilder();
+		MsIdSearch.and("msid",  MsIdSearch.entity().getMsid(), SearchCriteria.Op.EQ);
+		MsIdSearch.done();
+		
+	    ActiveSearch = createSearchBuilder();
+	    ActiveSearch.and("lastUpdateTime", ActiveSearch.entity().getLastUpdateTime(),  SearchCriteria.Op.GT);
+	    ActiveSearch.and("removed", ActiveSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
+	    ActiveSearch.done();
+
+	    InactiveSearch = createSearchBuilder();
+	    InactiveSearch.and("lastUpdateTime", InactiveSearch.entity().getLastUpdateTime(),  SearchCriteria.Op.LTEQ);
+	    InactiveSearch.and("removed", InactiveSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
+	    InactiveSearch.done();
+	    
+	    StateSearch = createSearchBuilder();
+	    StateSearch.and("state", StateSearch.entity().getState(), SearchCriteria.Op.IN);
+	    StateSearch.done();
+	}
+	
+	
+	@Override
+    public void update(long id, long runId, State state, Date lastUpdate) {
+	    Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = txn.prepareAutoCloseStatement("update mshost set state=?, last_update=? where id=? and runid=?");
+            pstmt.setString(1, state.toString());
+            pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
+            pstmt.setLong(3, id);
+            pstmt.setLong(4, runId);
+            
+            int count = pstmt.executeUpdate();
+            
+            if(count < 1) {
+                throw new CloudRuntimeException("Invalid cluster session detected", new ClusterInvalidSessionException("runid " + runId + " is no longer valid"));
+            }
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB exception on " + pstmt.toString(), e);
+        }
+    }
+	
+	@Override
+	public List<ManagementServerHostVO> listBy(ManagementServerHost.State...states) {
+	    SearchCriteria<ManagementServerHostVO> sc = StateSearch.create();
+
+        sc.setParameters("state", (Object[]) states);
+        
+        return listBy(sc);
+	}
+	
+	@Override
+	public List<Long> listOrphanMsids() {
+		List<Long> orphanList = new ArrayList<Long>();
+		
+	    Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = txn.prepareAutoCloseStatement(
+            	"select t.mgmt_server_id from (select mgmt_server_id, count(*) as count from host group by mgmt_server_id) as t WHERE t.count > 0 AND t.mgmt_server_id NOT IN (select msid from mshost)");
+
+            ResultSet rs = pstmt.executeQuery();
+            while(rs.next()) {
+            	orphanList.add(rs.getLong(1));
+            }
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB exception on " + pstmt.toString(), e);
+        }
+        
+        return orphanList;
+	}
+
+	@Override
+	public ManagementServerHostVO findOneInUpState(Filter filter) {
+	    SearchCriteria<ManagementServerHostVO> sc = StateSearch.create();
+
+        sc.setParameters("state", ManagementServerHost.State.Up);
+        
+        List<ManagementServerHostVO> mshosts =  listBy(sc, filter);
+        if(mshosts != null && mshosts.size() > 0){
+        	return mshosts.get(0);	
+        }
+        return null;
+	}
+	
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c11dbad9/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostPeerDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostPeerDao.java b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostPeerDao.java
new file mode 100644
index 0000000..14c872a
--- /dev/null
+++ b/engine/schema/src/com/cloud/cluster/dao/ManagementServerHostPeerDao.java
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.cluster.dao;
+
+import com.cloud.cluster.ManagementServerHost;
+import com.cloud.cluster.ManagementServerHostPeerVO;
+import com.cloud.utils.db.GenericDao;
+
+public interface ManagementServerHostPeerDao extends GenericDao<ManagementServerHostPeerVO, Long> {
+    void clearPeerInfo(long ownerMshost);
+    void updatePeerInfo(long ownerMshost, long peerMshost, long peerRunid, ManagementServerHost.State peerState);
+    int countStateSeenInPeers(long mshost, long runid, ManagementServerHost.State state);
+}


Mime
View raw message