cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kis...@apache.org
Subject git commit: updated refs/heads/4.2 to 6e9bf56
Date Tue, 30 Jul 2013 16:59:03 GMT
Updated Branches:
  refs/heads/4.2 a5054c1cd -> 6e9bf56cf


Fix for incorect capacity displayed at the db.

Conflicts:
	engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
	server/src/com/cloud/alert/AlertManagerImpl.java
	server/src/com/cloud/api/ApiResponseHelper.java


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/6e9bf56c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/6e9bf56c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/6e9bf56c

Branch: refs/heads/4.2
Commit: 6e9bf56cf195f990cf4b37556d0f5e796de2238a
Parents: a5054c1
Author: Bharat Kumar <bharat.kumar@citrix.com>
Authored: Fri Jul 26 12:16:15 2013 +0530
Committer: Kishan Kavala <kishan@cloud.com>
Committed: Tue Jul 30 22:13:44 2013 +0530

----------------------------------------------------------------------
 .../com/cloud/capacity/dao/CapacityDaoImpl.java | 180 +++++++++++--------
 .../src/com/cloud/alert/AlertManagerImpl.java   |   3 +-
 server/src/com/cloud/api/ApiResponseHelper.java |  18 +-
 .../com/cloud/server/ManagementServerImpl.java  |   4 -
 4 files changed, 110 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6e9bf56c/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
index dc2899f..88a2b2b 100755
--- a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
+++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
@@ -27,6 +27,8 @@ import java.util.Map;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.log4j.Logger;
@@ -63,6 +65,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
     private final SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
     private final SearchBuilder<CapacityVO> _allFieldsSearch;
     @Inject protected PrimaryDataStoreDao _storagePoolDao;
+    @Inject protected ClusterDetailsDao _clusterDetailsDao;
 
 
     private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT  host_capacity.host_id
FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id
AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id
= cluster_details.cluster_id) AND  host.type = ? AND cluster_details.name='cpuOvercommitRatio'
AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity)
>= ? and host_capacity.capacity_type = '1' " +
@@ -76,7 +79,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
 
     private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= "
AND capacity_type = ?  AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity
* cluster_details.value) ASC";
 
-    private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT
capacity.pod_id  FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref`
pod " +
+    private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT
capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref`
pod " +
                                                           " ON (pod.id = capacity.pod_id
AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id
= cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name=
? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? ";
 
     private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id,
SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity
WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity)
ASC ";
@@ -90,13 +93,15 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
                                                                   "FROM `cloud`.`op_host_capacity`
capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is
NULL)"+
                                                                   "WHERE dc.allocation_state
= ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state
= ? AND capacity_type not in (3,4) ";
     
-    private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity)
+ sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) *
(select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor'))
else sum(total_capacity) end), " +
-                                                                         "((sum(capacity.used_capacity)
+ sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity)
* (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor'))
else sum(total_capacity) end)) percent,"+
-                                                                         " capacity.capacity_type,
capacity.data_center_id "+
-                                                                         "FROM `cloud`.`op_host_capacity`
capacity "+
-                                                                         "WHERE  total_capacity
> 0 AND data_center_id is not null AND capacity_state='Enabled'";
+    private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT sum(capacity.used_capacity),
sum(capacity.reserved_capacity)," +
+            " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))"
+
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))"
+
+            "else sum(total_capacity) end)," +
+            "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type
when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name=
'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))" + "when '0' then
(sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name='memoryOvercommitRatio'
AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent,"
+
+            "capacity.capacity_type, capacity.data_center_id FROM `cloud`.`op_host_capacity`
capacity WHERE  total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'";
+
     private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id,
capacity_type order by percent desc limit ";
-    private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 =  "SELECT (sum(capacity.used_capacity)
+ sum(capacity.reserved_capacity))," +
+    private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 =  "SELECT sum(capacity.used_capacity),
sum(capacity.reserved_capacity)," +
             " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))
" +
             "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else
sum(total_capacity) end)," +
             "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type
when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name=
'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
@@ -105,7 +110,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
 
     private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id,
capacity_type order by percent desc limit ";
 
-    private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity)
+ sum(capacity.reserved_capacity))," +
+    private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT sum(capacity.used_capacity),
sum(capacity.reserved_capacity)," +
             " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))
" +
             "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else
sum(total_capacity) end)," +
             "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type
when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name=
'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
@@ -116,6 +121,15 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
     private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id,
capacity_type order by percent desc limit ";
     private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity`
SET capacity_state = ? WHERE ";
 
+    private static final String LIST_CAPACITY_GROUP_BY_CAPACITY_PART1= "SELECT sum(capacity.used_capacity),
sum(capacity.reserved_capacity)," +
+            " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))
" +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else
sum(total_capacity) end)," +
+            "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type
when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name=
'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
+            "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details`
where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))
else sum(total_capacity) end)) percent," +
+            "capacity.capacity_type, capacity.data_center_id, pod_id FROM `cloud`.`op_host_capacity`
capacity WHERE  total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'
";
+
+    private static final String LIST_CAPACITY_GROUP_BY_CAPACITY_PART2 = " GROUP BY capacity_type";
+
     /* In the below query"LIST_CLUSTERS_CROSSING_THRESHOLD" the threshold value is getting
from the cluster_details table if not present then it gets from the global configuration
     *
     * CASE statement works like
@@ -346,8 +360,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
                 if(level == 3 && rs.getLong(7) != 0)
                     capacityClusterId = rs.getLong(7);                   
 
-                SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2),
rs.getFloat(3),
-                        (short)rs.getLong(4), rs.getLong(5),
+                SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(3),
rs.getFloat(4),
+                        (short)rs.getLong(5), rs.getLong(6),
                         capacityPodId, capacityClusterId);
 
                 result.add(summedCapacity);
@@ -364,53 +378,50 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
     @Override
     public  List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId,
Long podId, Long clusterId){
 
-        GenericSearchBuilder<CapacityVO, SummedCapacity> SummedCapacitySearch = createSearchBuilder(SummedCapacity.class);
-        SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId());
-        SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity());
-        SummedCapacitySearch.select("sumReserved", Func.SUM, SummedCapacitySearch.entity().getReservedCapacity());
-        SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity());
-        SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType());
       
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<SummedCapacity> result = new ArrayList<SummedCapacity>();
 
-        if (zoneId==null && podId==null && clusterId==null){ // List all
the capacities grouped by zone, capacity Type
-            SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getDataCenterId(),
SummedCapacitySearch.entity().getCapacityType());            
-        }else {
-            SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType());
-        }
+        StringBuilder sql = new StringBuilder(LIST_CAPACITY_GROUP_BY_CAPACITY_PART1);
+        List<Long> resourceIdList = new ArrayList<Long>();
 
         if (zoneId != null){
-            SummedCapacitySearch.and("dcId", SummedCapacitySearch.entity().getDataCenterId(),
Op.EQ);
+            sql.append(" AND capacity.data_center_id = ?");
+            resourceIdList.add(zoneId);
         }
         if (podId != null){
-            SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ);
+            sql.append(" AND capacity.pod_id = ?");
+            resourceIdList.add(podId);
         }
         if (clusterId != null){
-            SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(),
Op.EQ);
+            sql.append(" AND capacity.cluster_id = ?");
+            resourceIdList.add(clusterId);
+        }
+        if (capacityType != null) {
+            sql.append(" AND capacity.capacity_type = ?");
+            resourceIdList.add(capacityType.longValue());
         }
-        if (capacityType != null){
-            SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(),
Op.EQ);
-        }        
-
-        SummedCapacitySearch.done();
 
+        sql.append(LIST_CAPACITY_GROUP_BY_CAPACITY_PART2);
 
-        SearchCriteria<SummedCapacity> sc = SummedCapacitySearch.create();
-        if (zoneId != null){
-            sc.setParameters("dcId", zoneId);
-        }
-        if (podId != null){
-            sc.setParameters("podId", podId);
-        }
-        if (clusterId != null){
-            sc.setParameters("clusterId", clusterId);
-        }
-        if (capacityType != null){
-            sc.setParameters("capacityType", capacityType);
-        }
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
 
-        Filter filter = new Filter(CapacityVO.class, null, true, null, null);
-        List<SummedCapacity> results = customSearchIncludingRemoved(sc, filter);
-        return results;        
 
+            for (int i = 0; i < resourceIdList.size(); i++){
+                pstmt.setLong(i+1, resourceIdList.get(i));
+            }
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                SummedCapacity summedCapacity = new SummedCapacity(rs.getLong(1), rs.getLong(2),
rs.getLong(3), (short)rs.getLong(5), null, null, rs.getLong(6));
+                result.add(summedCapacity);
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
     }
 
     public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean
add) {
@@ -579,39 +590,49 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
             return percentUsed;
         }
     }
+
     @Override
     public List<SummedCapacity> findByClusterPodZone(Long zoneId, Long podId, Long
clusterId){
 
-        GenericSearchBuilder<CapacityVO, SummedCapacity> SummedCapacitySearch = createSearchBuilder(SummedCapacity.class);
-        SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity());
-        SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity());
  
-        SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType());
                               
-        SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType());
-
-        if(zoneId != null){
-            SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(),
Op.EQ);
-        }
-        if (podId != null){
-            SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ);
-        }
-        if (clusterId != null){
-            SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(),
Op.EQ);
-        }
-        SummedCapacitySearch.done();
+        Transaction txn = Transaction.currentTxn();
+        PreparedStatement pstmt = null;
+        List<SummedCapacity> result = new ArrayList<SummedCapacity>();
 
+        StringBuilder sql = new StringBuilder(LIST_CAPACITY_GROUP_BY_CAPACITY_PART1);
+        List<Long> resourceIdList = new ArrayList<Long>();
 
-        SearchCriteria<SummedCapacity> sc = SummedCapacitySearch.create();
         if (zoneId != null){
-            sc.setParameters("zoneId", zoneId);
+            sql.append(" AND capacity.data_center_id = ?");
+            resourceIdList.add(zoneId);
         }
         if (podId != null){
-            sc.setParameters("podId", podId);
+            sql.append(" AND capacity.pod_id = ?");
+            resourceIdList.add(podId);
         }
         if (clusterId != null){
-            sc.setParameters("clusterId", clusterId);
+            sql.append(" AND capacity.cluster_id = ?");
+            resourceIdList.add(clusterId);
         }
+        sql.append(LIST_CAPACITY_GROUP_BY_CAPACITY_PART2);
 
-        return customSearchIncludingRemoved(sc, null);         
+        try {
+            pstmt = txn.prepareAutoCloseStatement(sql.toString());
+
+
+            for (int i = 0; i < resourceIdList.size(); i++){
+                pstmt.setLong(i+1, resourceIdList.get(i));
+            }
+            ResultSet rs = pstmt.executeQuery();
+            while (rs.next()) {
+                SummedCapacity summedCapacity = new SummedCapacity(rs.getLong(1), rs.getLong(2),
rs.getLong(3), (short)rs.getLong(5), null, null, rs.getLong(6));
+                result.add(summedCapacity);
+            }
+            return result;
+        } catch (SQLException e) {
+            throw new CloudRuntimeException("DB Exception on: " + sql, e);
+        } catch (Throwable e) {
+            throw new CloudRuntimeException("Caught: " + sql, e);
+        }
     }
 
     @Override
@@ -776,16 +797,27 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long>
implements
         PreparedStatement pstmt = null;
         List<Long> result = new ArrayList<Long>();
         Map<Long, Double> podCapacityMap = new HashMap<Long, Double>();
-        
-        StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY);
+        StringBuilder sql = null;
         try {
-            pstmt = txn.prepareAutoCloseStatement(sql.toString());
-            pstmt.setLong(2, zoneId);
-            pstmt.setShort(3, capacityTypeForOrdering);
-            
-            if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
-                pstmt.setString(3, "cpuOvercommitRatio");
+            if (capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU | capacityTypeForOrdering
== CapacityVO.CAPACITY_TYPE_MEMORY) {
+                sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_OVERCOMMIT_CAPACITY);
+                pstmt = txn.prepareAutoCloseStatement(sql.toString());
+                pstmt.setLong(1, zoneId);
+                pstmt.setShort(2, capacityTypeForOrdering);
+
+                if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
+                    pstmt.setString(3, "cpuOvercommitRatio");
+                }
+                else if (capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_MEMORY) {
+                    pstmt.setString(3,"memoryOvercommitRatio");
+                }
+            }else {
+                sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY);
+                pstmt = txn.prepareAutoCloseStatement(sql.toString());
+                pstmt.setLong(1, zoneId);
+                pstmt.setShort(2,capacityTypeForOrdering);
             }
+
             
             ResultSet rs = pstmt.executeQuery();
             while (rs.next()) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6e9bf56c/server/src/com/cloud/alert/AlertManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java
index bff36c7..c437a8c 100755
--- a/server/src/com/cloud/alert/AlertManagerImpl.java
+++ b/server/src/com/cloud/alert/AlertManagerImpl.java
@@ -573,7 +573,6 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager
{
         for( ClusterVO cluster : clusterList){
             for (Short capacityType : clusterCapacityTypes){
                 List<SummedCapacity> capacity = new ArrayList<SummedCapacity>();
-                float overProvFactor = getOverProvisioningFactor(cluster.getId(), capacityType);
                 capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(),
null, cluster.getId());
 
                 // cpu and memory allocated capacity notification threshold can be defined
at cluster level, so getting the value if they are defined at cluster level
@@ -599,7 +598,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager
{
                     continue;
                 }
 
-                double totalCapacity = capacity.get(0).getTotalCapacity() * overProvFactor;

+                double totalCapacity = capacity.get(0).getTotalCapacity();
                 double usedCapacity =  capacity.get(0).getUsedCapacity() + capacity.get(0).getReservedCapacity();
                 if (totalCapacity != 0 && usedCapacity/totalCapacity > threshold){
                     generateEmailAlert(ApiDBUtils.findZoneById(cluster.getDataCenterId()),
ApiDBUtils.findPodById(cluster.getPodId()), cluster,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6e9bf56c/server/src/com/cloud/api/ApiResponseHelper.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java
index e331bef..307e6bb 100755
--- a/server/src/com/cloud/api/ApiResponseHelper.java
+++ b/server/src/com/cloud/api/ApiResponseHelper.java
@@ -827,9 +827,7 @@ public class ApiResponseHelper implements ResponseGenerator {
                 CapacityResponse capacityResponse = new CapacityResponse();
                 capacityResponse.setCapacityType(capacity.getCapacityType());
                 capacityResponse.setCapacityUsed(capacity.getUsedCapacity());
-                if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
-                    capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity())));
-                } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)
{
+                if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)
{
                     List<SummedCapacity> c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null,
pod.getId(), null);
                     capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity());
                     capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity());
@@ -867,9 +865,7 @@ public class ApiResponseHelper implements ResponseGenerator {
             CapacityResponse capacityResponse = new CapacityResponse();
             capacityResponse.setCapacityType(capacity.getCapacityType());
             capacityResponse.setCapacityUsed(capacity.getUsedCapacity());
-            if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
-                capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity()
* cpuOverprovisioningFactor)));
-            } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)
{
+            if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) {
                 List<SummedCapacity> c = ApiDBUtils.findNonSharedStorageForClusterPodZone(zoneId,
null, null);
                 capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity());
                 capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity());
@@ -991,15 +987,7 @@ public class ApiResponseHelper implements ResponseGenerator {
                 capacityResponse.setCapacityType(capacity.getCapacityType());
                 capacityResponse.setCapacityUsed(capacity.getUsedCapacity());
 
-                if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
-                    if (cpuOvercommitRatio != null) {
-                        capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity()
* Float.parseFloat(cpuOvercommitRatio))));
-                    }
-                } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY) {
-                    if (memoryOvercommitRatio != null) {
-                        capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity()
* Float.parseFloat(memoryOvercommitRatio))));
-                    }
-                } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)
{
+                if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)
{
                     List<SummedCapacity> c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null,
null, cluster.getId());
                     capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity());
                     capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6e9bf56c/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index d573073..26efa10 100755
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -2380,10 +2380,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         for (SummedCapacity summedCapacity : summedCapacities) {
             CapacityVO capacity = new CapacityVO(null, summedCapacity.getDataCenterId(),
podId, clusterId, summedCapacity.getUsedCapacity()
                     + summedCapacity.getReservedCapacity(), summedCapacity.getTotalCapacity(),
summedCapacity.getCapacityType());
-
-            if (summedCapacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
-                capacity.setTotalCapacity((long) (summedCapacity.getTotalCapacity() * ApiDBUtils.getCpuOverprovisioningFactor()));
-            }
             capacities.add(capacity);
         }
 


Mime
View raw message