cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bfede...@apache.org
Subject [04/24] CLOUDSTACK-2056: DeploymentPlanner choice via ServiceOffering
Date Fri, 17 May 2013 17:14:01 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/deploy/FirstFitPlanner.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java
index e8504a9..caf8c6e 100755
--- a/server/src/com/cloud/deploy/FirstFitPlanner.java
+++ b/server/src/com/cloud/deploy/FirstFitPlanner.java
@@ -49,6 +49,7 @@ import com.cloud.dc.Pod;
 import com.cloud.dc.dao.ClusterDao;
 import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.exception.InsufficientServerCapacityException;
 import com.cloud.host.Host;
 import com.cloud.host.HostVO;
@@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDao;
 
 @Local(value=DeploymentPlanner.class)
-public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
+public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner {
     private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class);
     @Inject protected HostDao _hostDao;
     @Inject protected DataCenterDao _dcDao;
@@ -103,28 +104,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
     @Inject DataStoreManager dataStoreMgr;
     @Inject protected ClusterDetailsDao _clusterDetailsDao;
 
-    protected List<StoragePoolAllocator> _storagePoolAllocators;
-    public List<StoragePoolAllocator> getStoragePoolAllocators() {
-		return _storagePoolAllocators;
-	}
-	public void setStoragePoolAllocators(
-			List<StoragePoolAllocator> _storagePoolAllocators) {
-		this._storagePoolAllocators = _storagePoolAllocators;
-	}
-
-	protected List<HostAllocator> _hostAllocators;
-    public List<HostAllocator> getHostAllocators() {
-		return _hostAllocators;
-	}
-	public void setHostAllocators(List<HostAllocator> _hostAllocators) {
-		this._hostAllocators = _hostAllocators;
-	}
 
 	protected String _allocationAlgorithm = "random";
+    protected String _globalDeploymentPlanner = "FirstFitPlanner";
 
 
     @Override
-    public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
+    public List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
             DeploymentPlan plan, ExcludeList avoid)
                     throws InsufficientServerCapacityException {
         VirtualMachine vm = vmProfile.getVirtualMachine();
@@ -138,136 +124,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
             return null;
         }
 
-        ServiceOffering offering = vmProfile.getServiceOffering();
-        int cpu_requested = offering.getCpu() * offering.getSpeed();
-        long ram_requested = offering.getRamSize() * 1024L * 1024L;
-
-
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
-
-            s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
-                    ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
-
-            s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
-        }
-
-        String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
-
-        if(plan.getHostId() != null && haVmTag == null){
-            Long hostIdSpecified = plan.getHostId();
-            if (s_logger.isDebugEnabled()){
-                s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: "
-                        + hostIdSpecified);
-            }
-            HostVO host = _hostDao.findById(hostIdSpecified);
-            if (host == null) {
-                s_logger.debug("The specified host cannot be found");
-            } else if (avoid.shouldAvoid(host)) {
-                s_logger.debug("The specified host is in avoid set");
-            } else {
-                if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
-                }
-
-                // search for storage under the zone, pod, cluster of the host.
-                DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
-                        host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
-
-                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
-                        lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                List<Volume> readyAndReusedVolumes = result.second();
-
-                // choose the potential pool for this VM for this host
-                if (!suitableVolumeStoragePools.isEmpty()) {
-                    List<Host> suitableHosts = new ArrayList<Host>();
-                    suitableHosts.add(host);
-
-                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
-                            suitableHosts, suitableVolumeStoragePools);
-                    if (potentialResources != null) {
-                        Pod pod = _podDao.findById(host.getPodId());
-                        Cluster cluster = _clusterDao.findById(host.getClusterId());
-                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                        // remove the reused vol<->pool from destination, since
-                        // we don't have to prepare this volume.
-                        for (Volume vol : readyAndReusedVolumes) {
-                            storageVolMap.remove(vol);
-                        }
-                        DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
-                        s_logger.debug("Returning Deployment Destination: " + dest);
-                        return dest;
-                    }
-                }
-            }
-            s_logger.debug("Cannnot deploy to specified host, returning.");
-            return null;
-        }
-
-        if (vm.getLastHostId() != null && haVmTag == null) {
-            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
-
-            HostVO host = _hostDao.findById(vm.getLastHostId());
-            if(host == null){
-                s_logger.debug("The last host of this VM cannot be found");
-            }else if(avoid.shouldAvoid(host)){
-                s_logger.debug("The last host of this VM is in avoid set");
-            }else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
-                s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
-            }else{
-                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
-                    long cluster_id = host.getClusterId();
-                    ClusterDetailsVO cluster_detail_cpu =  _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
-                    ClusterDetailsVO cluster_detail_ram =  _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
-                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
-                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
-                    if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){
-                        s_logger.debug("The last host of this VM is UP and has enough capacity");
-                        s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
-                        //search for storage under the zone, pod, cluster of the last host.
-                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
-                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                        List<Volume> readyAndReusedVolumes = result.second();
-                        //choose the potential pool for this VM for this host
-                        if(!suitableVolumeStoragePools.isEmpty()){
-                            List<Host> suitableHosts = new ArrayList<Host>();
-                            suitableHosts.add(host);
-
-                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
-                            if(potentialResources != null){
-                                Pod pod = _podDao.findById(host.getPodId());
-                                Cluster cluster = _clusterDao.findById(host.getClusterId());
-                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                                // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
-                                for(Volume vol : readyAndReusedVolumes){
-                                    storageVolMap.remove(vol);
-                                }
-                                DeployDestination dest =  new DeployDestination(dc, pod, cluster, host, storageVolMap);
-                                s_logger.debug("Returning Deployment Destination: "+ dest);
-                                return dest;
-                            }
-                        }
-                    }else{
-                        s_logger.debug("The last host of this VM does not have enough capacity");
-                    }
-                }else{
-                    s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState());
-                }
-            }
-            s_logger.debug("Cannot choose the last host to deploy this VM ");
-        }
-
-
         List<Long> clusterList = new ArrayList<Long>();
         if (plan.getClusterId() != null) {
             Long clusterIdSpecified = plan.getClusterId();
             s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified);
             ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
             if (cluster != null ){
-                clusterList.add(clusterIdSpecified);
-                return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
+                if (avoid.shouldAvoid(cluster)) {
+                    s_logger.debug("The specified cluster is in avoid set, returning.");
+                } else {
+                    clusterList.add(clusterIdSpecified);
+                    removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
+                }
+                return clusterList;
             }else{
                 s_logger.debug("The specified cluster cannot be found, returning.");
                 avoid.addCluster(plan.getClusterId());
@@ -280,11 +149,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
             HostPodVO pod = _podDao.findById(podIdSpecified);
             if (pod != null) {
-                DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
-                if(dest == null){
-                    avoid.addPod(plan.getPodId());
+                if (avoid.shouldAvoid(pod)) {
+                    s_logger.debug("The specified pod is in avoid set, returning.");
+                } else {
+                    clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
+                    if (clusterList == null) {
+                        avoid.addPod(plan.getPodId());
+                    }
                 }
-                return dest;
+                return clusterList;
             } else {
                 s_logger.debug("The specified Pod cannot be found, returning.");
                 avoid.addPod(plan.getPodId());
@@ -305,7 +178,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
     }
 
-    private DeployDestination scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
+    private List<Long> scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
 
         ServiceOffering offering = vmProfile.getServiceOffering();
         int requiredCpu = offering.getCpu() * offering.getSpeed();
@@ -341,20 +214,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         if(!podsWithCapacity.isEmpty()){
 
             prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan);
+            if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("No Pods found for destination, returning.");
+                }
+                return null;
+            }
 
+            List<Long> clusterList = new ArrayList<Long>();
             //loop over pods
             for(Long podId : prioritizedPodIds){
                 s_logger.debug("Checking resources under Pod: "+podId);
-                DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid);
-                if(dest != null){
-                    return dest;
+                List<Long> clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan,
+                        avoid);
+                if (clustersUnderPod != null) {
+                    clusterList.addAll(clustersUnderPod);
                 }
-                avoid.addPod(podId);
-            }
-            if (s_logger.isDebugEnabled()) {
-                s_logger.debug("No Pods found for destination, returning.");
             }
-            return null;
+            return clusterList;
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning.");
@@ -363,7 +240,69 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         }
     }
 
-    private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
+    private Map<Short, Float> getCapacityThresholdMap() {
+        // Lets build this real time so that the admin wont have to restart MS
+        // if he changes these values
+        Map<Short, Float> disableThresholdMap = new HashMap<Short, Float>();
+
+        String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key());
+        float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
+        disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold);
+
+        String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key());
+        float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F);
+        disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold);
+
+        return disableThresholdMap;
+    }
+
+    private List<Short> getCapacitiesForCheckingThreshold() {
+        List<Short> capacityList = new ArrayList<Short>();
+        capacityList.add(Capacity.CAPACITY_TYPE_CPU);
+        capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
+        return capacityList;
+    }
+
+    private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid,
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan) {
+
+        List<Short> capacityList = getCapacitiesForCheckingThreshold();
+        List<Long> clustersCrossingThreshold = new ArrayList<Long>();
+
+        ServiceOffering offering = vmProfile.getServiceOffering();
+        int cpu_requested = offering.getCpu() * offering.getSpeed();
+        long ram_requested = offering.getRamSize() * 1024L * 1024L;
+
+        // For each capacity get the cluster list crossing the threshold and
+        // remove it from the clusterList that will be used for vm allocation.
+        for (short capacity : capacityList) {
+
+            if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) {
+                return;
+            }
+            if (capacity == Capacity.CAPACITY_TYPE_CPU) {
+                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
+                        plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
+            } else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) {
+                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
+                        plan.getDataCenterId(), Config.MemoryCapacityDisableThreshold.key(), ram_requested);
+            }
+
+            if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) {
+                // addToAvoid Set
+                avoid.addClusterList(clustersCrossingThreshold);
+                // Remove clusters crossing disabled threshold
+                clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
+
+                s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
+                        " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
+            }
+
+        }
+    }
+
+    private List<Long> scanClustersForDestinationInZoneOrPod(long id, boolean isZone,
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid) {
 
         VirtualMachine vm = vmProfile.getVirtualMachine();
         ServiceOffering offering = vmProfile.getServiceOffering();
@@ -396,6 +335,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
                     prioritizedClusterIds.removeAll(disabledClusters);
                 }
             }
+
+            removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan);
+
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No clusters found having a host with enough capacity, returning.");
@@ -404,7 +346,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         }
         if(!prioritizedClusterIds.isEmpty()){
             List<Long> clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan);
-            return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
+            return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
@@ -452,114 +394,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         return disabledPods;
     }
 
-    private List<Short> getCapacitiesForCheckingThreshold(){
-        List<Short> capacityList = new ArrayList<Short>();
-        capacityList.add(Capacity.CAPACITY_TYPE_CPU);
-        capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
-        return capacityList;
-    }
-
-    private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
-
-        List<Short> capacityList = getCapacitiesForCheckingThreshold();
-        List<Long> clustersCrossingThreshold = new ArrayList<Long>();
-
-        ServiceOffering offering = vmProfile.getServiceOffering();
-        int cpu_requested = offering.getCpu() * offering.getSpeed();
-        long ram_requested = offering.getRamSize() * 1024L * 1024L;
-
-        // 	For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation.
-        for(short capacity : capacityList){
-
-        	if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
-           		return;
-           	}
-            if (capacity == Capacity.CAPACITY_TYPE_CPU) {
-                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
-            }
-            else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) {
-                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
-                        Config.MemoryCapacityDisableThreshold.key(), ram_requested );
-            }
-
-
-           	if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
-               	// addToAvoid Set
-           		avoid.addClusterList(clustersCrossingThreshold);
-           		// Remove clusters crossing disabled threshold
-               	clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
-
-                   s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
-                           " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
-           	}
-
-        }
-    }
-
-    private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
-            DeploymentPlan plan, ExcludeList avoid, DataCenter dc){
-
-        if (s_logger.isTraceEnabled()) {
-            s_logger.trace("ClusterId List to consider: " + clusterList);
-        }
-
-        removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
-
-        for(Long clusterId : clusterList){
-            Cluster clusterVO = _clusterDao.findById(clusterId);
-
-            if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
-                s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
-                avoid.addCluster(clusterVO.getId());
-                continue;
-            }
-
-            s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId());
-            //search for resources(hosts and storage) under this zone, pod, cluster.
-            DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
-
-            //find suitable hosts under this cluster, need as many hosts as we get.
-            List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-            //if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM
-            if(suitableHosts != null && !suitableHosts.isEmpty()){
-                if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
-                    Pod pod = _podDao.findById(clusterVO.getPodId());
-                    DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
-                    return dest;
-                }
-
-                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
-                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                List<Volume> readyAndReusedVolumes = result.second();
-
-                //choose the potential host and pool for the VM
-                if(!suitableVolumeStoragePools.isEmpty()){
-                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
-
-                    if(potentialResources != null){
-                        Pod pod = _podDao.findById(clusterVO.getPodId());
-                        Host host = _hostDao.findById(potentialResources.first().getId());
-                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                        // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
-                        for(Volume vol : readyAndReusedVolumes){
-                            storageVolMap.remove(vol);
-                        }
-                        DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, host, storageVolMap );
-                        s_logger.debug("Returning Deployment Destination: "+ dest);
-                        return dest;
-                    }
-                }else{
-                    s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId);
-                }
-            }else{
-                s_logger.debug("No suitable hosts found under this Cluster: "+clusterId);
-            }
-            avoid.addCluster(clusterVO.getId());
-        }
-        s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
-        return null;
-    }
-
 
     protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
         //look at the aggregate available cpu and ram per cluster
@@ -630,215 +464,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
     }
 
-
-    protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools){
-        s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
-
-        boolean hostCanAccessPool = false;
-        boolean haveEnoughSpace = false;
-        Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
-        TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
-            @Override
-            public int compare(Volume v1, Volume v2) {
-                if(v1.getSize() < v2.getSize())
-                    return 1;
-                else
-                    return -1;
-            }
-        });
-        volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
-        boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
-        for(Host potentialHost : suitableHosts){
-            Map<StoragePool,List<Volume>> volumeAllocationMap = new HashMap<StoragePool,List<Volume>>();
-            for(Volume vol : volumesOrderBySizeDesc){
-                haveEnoughSpace = false;
-                s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType());
-                List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
-                hostCanAccessPool = false;
-                for(StoragePool potentialSPool : volumePoolList){
-                    if(hostCanAccessSPool(potentialHost, potentialSPool)){
-                        hostCanAccessPool = true;
-                        if(multipleVolume){
-                            List<Volume> requestVolumes  = null;
-                            if(volumeAllocationMap.containsKey(potentialSPool))
-                                requestVolumes = volumeAllocationMap.get(potentialSPool);
-                            else
-                                requestVolumes = new ArrayList<Volume>();
-                            requestVolumes.add(vol);
-
-                            if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
-                                continue;
-                            volumeAllocationMap.put(potentialSPool,requestVolumes);
-                        }
-                        storage.put(vol, potentialSPool);
-                        haveEnoughSpace = true;
-                        break;
-                    }
-                }
-                if(!hostCanAccessPool){
-                    break;
-                }
-                if(!haveEnoughSpace) {
-                    s_logger.warn("insufficient capacity to allocate all volumes");
-                    break;
-                }
-            }
-            if(hostCanAccessPool && haveEnoughSpace){
-                s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM");
-                return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
-            }
-        }
-        s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
-        return null;
-    }
-
-    protected boolean hostCanAccessSPool(Host host, StoragePool pool){
-        boolean hostCanAccessSPool = false;
-
-        StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());
-        if(hostPoolLinkage != null){
-            hostCanAccessSPool = true;
-        }
-
-        s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId());
-        return hostCanAccessSPool;
-    }
-
-    protected List<Host> findSuitableHosts(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
-        List<Host> suitableHosts = new ArrayList<Host>();
-        for(HostAllocator allocator : _hostAllocators) {
-            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
-            if (suitableHosts != null && !suitableHosts.isEmpty()) {
-                break;
-            }
-        }
-
-        if(suitableHosts.isEmpty()){
-            s_logger.debug("No suitable hosts found");
-        }
-        return suitableHosts;
-    }
-
-    protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
-        List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
-        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
-        List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
-
-        //for each volume find list of suitable storage pools by calling the allocators
-        for (VolumeVO toBeCreated : volumesTobeCreated) {
-            s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")");
-
-            //If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused.
-            //In this case, also check if rest of the volumes are ready and can be reused.
-            if(plan.getPoolId() != null){
-                s_logger.debug("Volume has pool(" + plan.getPoolId() + ") already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId());
-                List<StoragePool> suitablePools = new ArrayList<StoragePool>();
-                StoragePool pool = null;
-                if(toBeCreated.getPoolId() != null){
-                    s_logger.debug("finding pool by id '" + toBeCreated.getPoolId() + "'");
-                    pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
-                }else{
-                    s_logger.debug("finding pool by id '" + plan.getPoolId() + "'");
-                    pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
-                }
-
-                if(pool != null){
-                    if(!pool.isInMaintenance()){
-                        if(!avoid.shouldAvoid(pool)){
-                            long exstPoolDcId = pool.getDataCenterId();
-
-                            long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
-                            long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
-                            if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
-                                s_logger.debug("Planner need not allocate a pool for this volume since its READY");
-                                suitablePools.add(pool);
-                                suitableVolumeStoragePools.put(toBeCreated, suitablePools);
-                                if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
-                                    readyAndReusedVolumes.add(toBeCreated);
-                                }
-                                continue;
-                            }else{
-                                s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
-                            }
-                        }else{
-                            s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
-                        }
-                    }else{
-                        s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
-                    }
-                }else{
-                    s_logger.debug("Unable to find pool by provided id");
-                }
-            }
-
-            if(s_logger.isDebugEnabled()){
-                s_logger.debug("We need to allocate new storagepool for this volume");
-            }
-            if(!isRootAdmin(plan.getReservationContext())){
-                if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){
-                    if(s_logger.isDebugEnabled()){
-                        s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
-                        s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
-                    }
-                    //Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled.
-                    //- remove any suitable pools found for other volumes.
-                    //All volumes should get suitable pools under this cluster; else we cant use this cluster.
-                    suitableVolumeStoragePools.clear();
-                    break;
-                }
-            }
-
-            s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
-
-            DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
-            DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
-
-            boolean useLocalStorage = false;
-            if (vmProfile.getType() != VirtualMachine.Type.User) {
-                String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key());
-                if (ssvmUseLocalStorage.equalsIgnoreCase("true")) {
-                    useLocalStorage = true;
-                }
-            } else {
-                useLocalStorage = diskOffering.getUseLocalStorage();
-
-                // TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage
-                // when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually
-                // saved in service offering, overrde the flag from service offering when it is a ROOT disk
-                if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
-                    if(toBeCreated.getVolumeType() == Volume.Type.ROOT)
-                        useLocalStorage = true;
-                }
-            }
-            diskProfile.setUseLocalStorage(useLocalStorage);
-
-            boolean foundPotentialPools = false;
-            for(StoragePoolAllocator allocator : _storagePoolAllocators) {
-                final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
-                if (suitablePools != null && !suitablePools.isEmpty()) {
-                    suitableVolumeStoragePools.put(toBeCreated, suitablePools);
-                    foundPotentialPools = true;
-                    break;
-                }
-            }
-
-            if(!foundPotentialPools){
-                s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId());
-                //No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes.
-                //All volumes should get suitable pools under this cluster; else we cant use this cluster.
-                suitableVolumeStoragePools.clear();
-                break;
-            }
-        }
-
-        if(suitableVolumeStoragePools.isEmpty()){
-            s_logger.debug("No suitable pools found");
-        }
-
-        return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
-    }
-
-
     private boolean isRootAdmin(ReservationContext reservationContext) {
         if(reservationContext != null){
             if(reservationContext.getAccount() != null){
@@ -859,10 +484,17 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
     @Override
     public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
-        if(vm.getHypervisorType() != HypervisorType.BareMetal){
-            //check the allocation strategy
-            if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) {
-                return true;
+        // check what the ServiceOffering says. If null, check the global config
+        ServiceOffering offering = vm.getServiceOffering();
+        if (vm.getHypervisorType() != HypervisorType.BareMetal) {
+            if (offering != null && offering.getDeploymentPlanner() != null) {
+                if (offering.getDeploymentPlanner().equals(this.getName())) {
+                    return true;
+                }
+            } else {
+                if (_globalDeploymentPlanner != null && _globalDeploymentPlanner.equals(this._name)) {
+                    return true;
+                }
             }
         }
         return false;
@@ -872,29 +504,20 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
     public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
         super.configure(name, params);
         _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
+        _globalDeploymentPlanner = _configDao.getValue(Config.VmDeploymentPlanner.key());
         return true;
     }
 
-    private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){
-        // Check if the zone exists in the system
-        DataCenterVO zone = _dcDao.findById(zoneId);
-        if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){
-            s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId);
-            return false;
-        }
-
-        Pod pod = _podDao.findById(podId);
-        if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){
-            s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId);
-            return false;
-        }
 
-        Cluster cluster = _clusterDao.findById(clusterId);
-        if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){
-            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId);
-            return false;
-        }
+    @Override
+    public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan,
+            ExcludeList avoid) throws InsufficientServerCapacityException {
+        // TODO Auto-generated method stub
+        return null;
+    }
 
-        return true;
+    @Override
+    public PlannerResourceUsage getResourceUsage() {
+        return PlannerResourceUsage.Shared;
     }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java
deleted file mode 100755
index ce49405..0000000
--- a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.deploy;
-
-import javax.ejb.Local;
-
-import org.apache.log4j.Logger;
-
-import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.vm.UserVmVO;
-
-@Local(value = {DeployPlannerSelector.class})
-public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector {
-    private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class);
-
-    @Override
-    public String selectPlanner(UserVmVO vm) {
-        if (vm.getHypervisorType() != HypervisorType.BareMetal) {
-            //check the allocation strategy
-            if (_allocationAlgorithm != null) {
-                if (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString())
-                        || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString())) {
-                    return "FirstFitPlanner";
-                } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
-                    return "UserDispersingPlanner";
-                } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString())
-                        || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) {
-                    return "UserConcentratedPodPlanner";
-                }
-            } else {
-                if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("The allocation algorithm is null, cannot select the planner");
-                }
-            }
-        }
-
-        return null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/deploy/PlannerHostReservationVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/PlannerHostReservationVO.java b/server/src/com/cloud/deploy/PlannerHostReservationVO.java
new file mode 100644
index 0000000..cf5f031
--- /dev/null
+++ b/server/src/com/cloud/deploy/PlannerHostReservationVO.java
@@ -0,0 +1,117 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.deploy;
+
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.EnumType;
+import javax.persistence.Enumerated;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import org.apache.cloudstack.api.InternalIdentity;
+
+import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
+
+@Entity
+@Table(name = "op_host_planner_reservation")
+public class PlannerHostReservationVO implements InternalIdentity {
+    @Id
+    @GeneratedValue(strategy=GenerationType.IDENTITY)
+    @Column(name="id")
+    private long id;
+
+    @Column(name="host_id")
+    private Long hostId;
+
+    @Column(name="data_center_id")
+    private Long dataCenterId;
+
+    @Column(name="pod_id")
+    private Long podId;
+
+    @Column(name="cluster_id")
+    private Long clusterId;
+
+    @Column(name = "resource_usage")
+    @Enumerated(EnumType.STRING)
+    private PlannerResourceUsage resourceUsage;
+
+    public PlannerHostReservationVO() {
+    }
+
+    public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId) {
+        this.hostId = hostId;
+        this.dataCenterId = dataCenterId;
+        this.podId = podId;
+        this.clusterId = clusterId;
+    }
+
+    public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId,
+            PlannerResourceUsage resourceUsage) {
+        this.hostId = hostId;
+        this.dataCenterId = dataCenterId;
+        this.podId = podId;
+        this.clusterId = clusterId;
+        this.resourceUsage = resourceUsage;
+    }
+
+    @Override
+    public long getId() {
+        return id;
+    }
+
+    public Long getHostId() {
+        return hostId;
+    }
+
+    public void setHostId(Long hostId) {
+        this.hostId = hostId;
+    }
+
+    public Long getDataCenterId() {
+        return dataCenterId;
+    }
+    public void setDataCenterId(Long dataCenterId) {
+        this.dataCenterId = dataCenterId;
+    }
+
+    public Long getPodId() {
+        return podId;
+    }
+    public void setPodId(long podId) {
+        this.podId = new Long(podId);
+    }
+
+    public Long getClusterId() {
+        return clusterId;
+    }
+    public void setClusterId(long clusterId) {
+        this.clusterId = new Long(clusterId);
+    }
+
+    public PlannerResourceUsage getResourceUsage() {
+        return resourceUsage;
+    }
+
+    public void setResourceUsage(PlannerResourceUsage resourceType) {
+        this.resourceUsage = resourceType;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java
new file mode 100644
index 0000000..69118f1
--- /dev/null
+++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.deploy.dao;
+
+import java.util.List;
+
+import com.cloud.deploy.PlannerHostReservationVO;
+import com.cloud.utils.db.GenericDao;
+
+public interface PlannerHostReservationDao extends GenericDao<PlannerHostReservationVO, Long> {
+
+    PlannerHostReservationVO findByHostId(long hostId);
+
+    List<PlannerHostReservationVO> listAllReservedHosts();
+
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java
new file mode 100644
index 0000000..41e0964
--- /dev/null
+++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.deploy.dao;
+
+import java.util.List;
+
+import javax.annotation.PostConstruct;
+import javax.ejb.Local;
+import com.cloud.deploy.PlannerHostReservationVO;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
+@Local(value = { PlannerHostReservationDao.class })
+public class PlannerHostReservationDaoImpl extends GenericDaoBase<PlannerHostReservationVO, Long> implements
+        PlannerHostReservationDao {
+
+    private SearchBuilder<PlannerHostReservationVO> _hostIdSearch;
+    private SearchBuilder<PlannerHostReservationVO> _reservedHostSearch;
+
+    public PlannerHostReservationDaoImpl() {
+
+    }
+
+    @PostConstruct
+    protected void init() {
+        _hostIdSearch = createSearchBuilder();
+        _hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ);
+        _hostIdSearch.done();
+
+        _reservedHostSearch = createSearchBuilder();
+        _reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL);
+        _reservedHostSearch.done();
+    }
+
+    @Override
+    public PlannerHostReservationVO findByHostId(long hostId) {
+        SearchCriteria<PlannerHostReservationVO> sc = _hostIdSearch.create();
+        sc.setParameters("hostId", hostId);
+        return findOneBy(sc);
+    }
+
+    @Override
+    public List<PlannerHostReservationVO> listAllReservedHosts() {
+        SearchCriteria<PlannerHostReservationVO> sc = _reservedHostSearch.create();
+        return listBy(sc);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/resource/ResourceManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java
index 0ab35dd..c60f095 100755
--- a/server/src/com/cloud/resource/ResourceManagerImpl.java
+++ b/server/src/com/cloud/resource/ResourceManagerImpl.java
@@ -85,6 +85,10 @@ import com.cloud.dc.dao.ClusterVSMMapDao;
 import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.dc.dao.DataCenterIpAddressDao;
 import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.PlannerHostReservationVO;
+import com.cloud.deploy.dao.PlannerHostReservationDao;
+import com.cloud.event.ActionEvent;
+import com.cloud.event.EventTypes;
 import com.cloud.exception.AgentUnavailableException;
 import com.cloud.exception.DiscoveryException;
 import com.cloud.exception.InvalidParameterValueException;
@@ -212,6 +216,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
     protected HighAvailabilityManager        _haMgr;
     @Inject
     protected StorageService                 _storageSvr;
+    @Inject
+    PlannerHostReservationDao _plannerHostReserveDao;
 
     protected List<? extends Discoverer> _discoverers;
     public List<? extends Discoverer> getDiscoverers() {
@@ -2851,4 +2857,41 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
 				ResourceState.Enabled);
         return sc.list();
 	}
+
+    @Override
+    @DB
+    @ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true)
+    public boolean releaseHostReservation(Long hostId) {
+        Transaction txn = Transaction.currentTxn();
+        try {
+            txn.start();
+            PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId);
+            if (reservationEntry != null) {
+                long id = reservationEntry.getId();
+                PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true);
+                if (hostReservation == null) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Host reservation for host: " + hostId + " does not even exist.  Release reservartion call is ignored.");
+                    }
+                    txn.rollback();
+                    return false;
+                }
+                hostReservation.setResourceUsage(null);
+                _plannerHostReserveDao.persist(hostReservation);
+                txn.commit();
+                return true;
+            }
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Host reservation for host: " + hostId
+                        + " does not even exist.  Release reservartion call is ignored.");
+            }
+            return false;
+        } catch (CloudRuntimeException e) {
+            throw e;
+        } catch (Throwable t) {
+            s_logger.error("Unable to release host reservation for host: " + hostId, t);
+            txn.rollback();
+            return false;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index bc37282..f74b7ad 100755
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -78,6 +78,7 @@ import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd;
 import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
 import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd;
 import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
+import org.apache.cloudstack.api.command.admin.host.ReleaseHostReservationCmd;
 import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
 import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
 import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd;
@@ -462,6 +463,7 @@ import com.cloud.dc.dao.HostPodDao;
 import com.cloud.dc.dao.PodVlanMapDao;
 import com.cloud.dc.dao.VlanDao;
 import com.cloud.deploy.DataCenterDeployment;
+import com.cloud.deploy.DeploymentPlanner;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.domain.DomainVO;
 import com.cloud.domain.dao.DomainDao;
@@ -589,6 +591,7 @@ import com.cloud.vm.dao.VMInstanceDao;
 
 import edu.emory.mathcs.backport.java.util.Arrays;
 import edu.emory.mathcs.backport.java.util.Collections;
+import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd;
 
 
 public class ManagementServerImpl extends ManagerBase implements ManagementServer {
@@ -726,11 +729,21 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
     private List<UserAuthenticator> _userAuthenticators;
     private List<UserAuthenticator> _userPasswordEncoders;
 
+    protected List<DeploymentPlanner> _planners;
+
+    public List<DeploymentPlanner> getPlanners() {
+        return _planners;
+    }
+
+    public void setPlanners(List<DeploymentPlanner> _planners) {
+        this._planners = _planners;
+    }
+
     @Inject ClusterManager _clusterMgr;
     private String _hashKey = null;
     private String _encryptionKey = null;
     private String _encryptionIV = null;
-    
+
     @Inject
     protected AffinityGroupVMMapDao _affinityGroupVMMapDao;
 
@@ -976,29 +989,29 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         String zoneType = cmd.getZoneType();
         String keyword = cmd.getKeyword();
         zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId);
-    	
-        
+
+
     	Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
-        
-        SearchBuilder<ClusterVO> sb = _clusterDao.createSearchBuilder();        
-        sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);        
-        sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);  
-        sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);          
-        sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);         
+
+        SearchBuilder<ClusterVO> sb = _clusterDao.createSearchBuilder();
+        sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
+        sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
+        sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);
+        sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
         sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ);
         sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ);
         sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
-        
+
         if(zoneType != null) {
             SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
-            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);    
+            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
             sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
         }
-        
-        
-        SearchCriteria<ClusterVO> sc = sb.create();        
+
+
+        SearchCriteria<ClusterVO> sc = sb.create();
         if (id != null) {
-            sc.setParameters("id", id);            
+            sc.setParameters("id", id);
         }
 
         if (name != null) {
@@ -1026,9 +1039,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         }
 
         if(zoneType != null) {
-            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);          
+            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
         }
-                
+
         if (keyword != null) {
             SearchCriteria<ClusterVO> ssc = _clusterDao.createSearchCriteria();
             ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%");
@@ -1441,26 +1454,26 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
     public Pair<List<? extends Pod>, Integer> searchForPods(ListPodsByCmd cmd) {
         String podName = cmd.getPodName();
         Long id = cmd.getId();
-        Long zoneId = cmd.getZoneId();        
+        Long zoneId = cmd.getZoneId();
         Object keyword = cmd.getKeyword();
         Object allocationState = cmd.getAllocationState();
         String zoneType = cmd.getZoneType();
         zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId);
 
-    	
+
     	Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal());
-        SearchBuilder<HostPodVO> sb = _hostPodDao.createSearchBuilder();        
+        SearchBuilder<HostPodVO> sb = _hostPodDao.createSearchBuilder();
         sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
-        sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);          
-        sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);         
+        sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
+        sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
         sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
-        
+
         if(zoneType != null) {
             SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
-            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);    
+            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
             sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
         }
-               
+
         SearchCriteria<HostPodVO> sc = sb.create();
         if (keyword != null) {
             SearchCriteria<HostPodVO> ssc = _hostPodDao.createSearchCriteria();
@@ -1473,23 +1486,23 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         if (id != null) {
             sc.setParameters("id", id);
         }
-        
+
         if (podName != null) {
             sc.setParameters("name", "%" + podName + "%");
         }
-        
+
         if (zoneId != null) {
             sc.setParameters("dataCenterId", zoneId);
         }
-        
+
         if (allocationState != null) {
             sc.setParameters("allocationState", allocationState);
-        }        
-    
+        }
+
         if(zoneType != null) {
-            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);          
+            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
         }
-        
+
         Pair<List<HostPodVO>, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter);
         return new Pair<List<? extends Pod>, Integer>(result.first(), result.second());
     }
@@ -2903,7 +2916,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(ListAffinityGroupsCmd.class);
         cmdList.add(UpdateVMAffinityGroupCmd.class);
         cmdList.add(ListAffinityGroupTypesCmd.class);
-
+        cmdList.add(ListDeploymentPlannersCmd.class);
+        cmdList.add(ReleaseHostReservationCmd.class);
         cmdList.add(AddResourceDetailCmd.class);
         cmdList.add(RemoveResourceDetailCmd.class);
         cmdList.add(ListResourceDetailsCmd.class);
@@ -3105,10 +3119,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
 
         if(zoneType != null) {
             SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
-            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);    
+            zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
             sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
-        }        
-        
+        }
+
         SearchCriteria<VMInstanceVO> sc = sb.create();
 
         if (keyword != null) {
@@ -3150,9 +3164,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         }
 
         if(zoneType != null) {
-            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);          
+            sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
         }
-        
+
         Pair<List<VMInstanceVO>, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter);
         return new Pair<List<? extends VirtualMachine>, Integer>(result.first(), result.second());
     }
@@ -3677,7 +3691,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         // although we may have race conditioning here, database transaction serialization should
         // give us the same key
         if (_hashKey == null) {
-            _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), 
+            _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(),
             	getBase64EncodedRandomKey(128));
         }
         return _hashKey;
@@ -3686,41 +3700,41 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
     @Override
     public String getEncryptionKey() {
         if (_encryptionKey == null) {
-            _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), 
-            	Config.EncryptionKey.getCategory(), 
+            _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(),
+            	Config.EncryptionKey.getCategory(),
             	getBase64EncodedRandomKey(128));
         }
         return _encryptionKey;
     }
-    
+
     @Override
     public String getEncryptionIV() {
         if (_encryptionIV == null) {
-            _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), 
-            	Config.EncryptionIV.getCategory(), 
+            _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(),
+            	Config.EncryptionIV.getCategory(),
             	getBase64EncodedRandomKey(128));
         }
         return _encryptionIV;
     }
-    
+
     @Override
     @DB
     public void resetEncryptionKeyIV() {
-    	
+
     	SearchBuilder<ConfigurationVO> sb = _configDao.createSearchBuilder();
     	sb.and("name1", sb.entity().getName(), SearchCriteria.Op.EQ);
     	sb.or("name2", sb.entity().getName(), SearchCriteria.Op.EQ);
     	sb.done();
-    	
+
     	SearchCriteria<ConfigurationVO> sc = sb.create();
     	sc.setParameters("name1", Config.EncryptionKey.key());
     	sc.setParameters("name2", Config.EncryptionIV.key());
-    	
+
     	_configDao.expunge(sc);
     	_encryptionKey = null;
     	_encryptionIV = null;
     }
-    
+
     private static String getBase64EncodedRandomKey(int nBits) {
 		SecureRandom random;
 		try {
@@ -4056,4 +4070,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         }
 
     }
+
+    @Override
+    public List<String> listDeploymentPlanners() {
+        List<String> plannersAvailable = new ArrayList<String>();
+        for (DeploymentPlanner planner : _planners) {
+            plannersAvailable.add(planner.getName());
+        }
+
+        return plannersAvailable;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index aa06529..a3b731a 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -98,7 +98,6 @@ import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.dc.dao.HostPodDao;
 import com.cloud.deploy.DataCenterDeployment;
 import com.cloud.deploy.DeployDestination;
-import com.cloud.deploy.DeployPlannerSelector;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.domain.DomainVO;
 import com.cloud.domain.dao.DomainDao;
@@ -402,9 +401,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
     @Inject
     AffinityGroupDao _affinityGroupDao;
 
-    @Inject
-    List<DeployPlannerSelector> plannerSelectors;
-
     protected ScheduledExecutorService _executor = null;
     protected int _expungeInterval;
     protected int _expungeDelay;
@@ -2836,7 +2832,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
         }
         return result;
     }
-    
+
     @Override
     public boolean finalizeDeployment(Commands cmds,
             VirtualMachineProfile<UserVmVO> profile, DeployDestination dest,
@@ -3036,7 +3032,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
                                 + " stop due to exception ", ex);
             }
         }
-        
+
         VMInstanceVO vm = profile.getVirtualMachine();
         List<NicVO> nics = _nicDao.listByVmId(vm.getId());
         for (NicVO nic : nics) {
@@ -3174,15 +3170,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
 
         VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid());
 
-        String plannerName = null;
-        for (DeployPlannerSelector dps : plannerSelectors) {
-            plannerName = dps.selectPlanner(vm);
-            if (plannerName != null) {
-                break;
-            }
-        }
+        // Get serviceOffering for Virtual Machine
+        ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId());
+        String plannerName = offering.getDeploymentPlanner();
         if (plannerName == null) {
-            throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType()));
+            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
+                plannerName = "BareMetalPlanner";
+            } else {
+                plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
+            }
         }
 
         String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString());
@@ -3826,7 +3822,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
                     + cmd.getAccountName() + " is disabled.");
         }
 
-        //check caller has access to both the old and new account 
+        //check caller has access to both the old and new account
         _accountMgr.checkAccess(caller, null, true, oldAccount);
         _accountMgr.checkAccess(caller, null, true, newAccount);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/test/com/cloud/resource/MockResourceManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java
index 5202c31..1fff3a6 100644
--- a/server/test/com/cloud/resource/MockResourceManagerImpl.java
+++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java
@@ -608,4 +608,10 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
 		return null;
 	}
 
+    @Override
+    public boolean releaseHostReservation(Long hostId) {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java
new file mode 100644
index 0000000..e3b7d31
--- /dev/null
+++ b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java
@@ -0,0 +1,359 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.vm;
+
+import static org.junit.Assert.*;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.GuestOSCategoryDao;
+import com.cloud.storage.dao.GuestOSDao;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.agent.AgentManager;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DataCenterDeployment;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.deploy.DeploymentClusterPlanner;
+import com.cloud.deploy.DeploymentPlanner;
+import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
+import com.cloud.deploy.DeploymentPlanningManagerImpl;
+import com.cloud.deploy.FirstFitPlanner;
+import com.cloud.deploy.PlannerHostReservationVO;
+import com.cloud.deploy.dao.PlannerHostReservationDao;
+import org.apache.cloudstack.affinity.AffinityGroupProcessor;
+import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
+import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.framework.messagebus.MessageBus;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.test.utils.SpringUtils;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.FilterType;
+import org.springframework.context.annotation.ComponentScan.Filter;
+import org.springframework.core.type.classreading.MetadataReader;
+import org.springframework.core.type.classreading.MetadataReaderFactory;
+import org.springframework.core.type.filter.TypeFilter;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+import org.springframework.test.context.support.AnnotationConfigContextLoader;
+
+import com.cloud.exception.AffinityConflictException;
+import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.user.AccountManager;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
+public class DeploymentPlanningManagerImplTest {
+
+    @Inject
+    DeploymentPlanningManagerImpl _dpm;
+
+    @Inject
+    PlannerHostReservationDao _plannerHostReserveDao;
+
+    @Inject VirtualMachineProfileImpl vmProfile;
+
+    @Inject
+    AffinityGroupVMMapDao _affinityGroupVMMapDao;
+
+    @Inject
+    ExcludeList avoids;
+
+    @Inject
+    DataCenterVO dc;
+
+    @Inject
+    DataCenterDao _dcDao;
+
+    @Inject
+    FirstFitPlanner _planner;
+
+    @Inject
+    ClusterDao _clusterDao;
+
+    private static long domainId = 5L;
+
+    private static long dataCenterId = 1L;
+
+
+    @BeforeClass
+    public static void setUp() throws ConfigurationException {
+    }
+
+    @Before
+    public void testSetUp() {
+        ComponentContext.initComponentsLifeCycle();
+
+        PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared);
+        Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO);
+        Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO);
+        Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L);
+
+        VMInstanceVO vm = new VMInstanceVO();
+        Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm);
+
+        Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc);
+        Mockito.when(dc.getId()).thenReturn(dataCenterId);
+
+        ClusterVO clusterVO = new ClusterVO();
+        clusterVO.setHypervisorType(HypervisorType.XenServer.toString());
+        Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO);
+
+        Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner");
+        List<DeploymentPlanner> planners = new ArrayList<DeploymentPlanner>();
+        planners.add(_planner);
+        _dpm.setPlanners(planners);
+
+    }
+
+    @Test
+    public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException {
+        ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
+                "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
+        Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
+
+        DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
+
+        Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(true);
+        DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
+        assertNull("DataCenter is in avoid set, destination should be null! ", dest);
+    }
+
+    @Test
+    public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException {
+        ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
+                "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null,
+                "UserDispersingPlanner");
+        Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
+
+        DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
+        Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
+
+        Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false);
+        DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
+        assertNull("Planner cannot handle, destination should be null! ", dest);
+    }
+
+    @Test
+    public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException {
+        ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
+                "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
+        Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
+
+        DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
+        Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
+        Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true);
+
+        Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null);
+        DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
+        assertNull("Planner cannot handle, destination should be null! ", dest);
+    }
+
+
+    @Configuration
+    @ComponentScan(basePackageClasses = { DeploymentPlanningManagerImpl.class }, includeFilters = { @Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false)
+    public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration {
+
+        @Bean
+        public FirstFitPlanner firstFitPlanner() {
+            return Mockito.mock(FirstFitPlanner.class);
+        }
+
+        @Bean
+        public DeploymentPlanner deploymentPlanner() {
+            return Mockito.mock(DeploymentPlanner.class);
+        }
+
+        @Bean
+        public DataCenterVO dataCenter() {
+            return Mockito.mock(DataCenterVO.class);
+        }
+
+        @Bean
+        public ExcludeList excludeList() {
+            return Mockito.mock(ExcludeList.class);
+        }
+
+        @Bean
+        public VirtualMachineProfileImpl virtualMachineProfileImpl() {
+            return Mockito.mock(VirtualMachineProfileImpl.class);
+        }
+
+        @Bean
+        public ClusterDetailsDao clusterDetailsDao() {
+            return Mockito.mock(ClusterDetailsDao.class);
+        }
+
+        @Bean
+        public DataStoreManager cataStoreManager() {
+            return Mockito.mock(DataStoreManager.class);
+        }
+
+        @Bean
+        public StorageManager storageManager() {
+            return Mockito.mock(StorageManager.class);
+        }
+
+        @Bean
+        public HostDao hostDao() {
+            return Mockito.mock(HostDao.class);
+        }
+
+        @Bean
+        public HostPodDao hostPodDao() {
+            return Mockito.mock(HostPodDao.class);
+        }
+
+        @Bean
+        public ClusterDao clusterDao() {
+            return Mockito.mock(ClusterDao.class);
+        }
+
+        @Bean
+        public GuestOSDao guestOSDao() {
+            return Mockito.mock(GuestOSDao.class);
+        }
+
+        @Bean
+        public GuestOSCategoryDao guestOSCategoryDao() {
+            return Mockito.mock(GuestOSCategoryDao.class);
+        }
+
+        @Bean
+        public CapacityManager capacityManager() {
+            return Mockito.mock(CapacityManager.class);
+        }
+
+        @Bean
+        public StoragePoolHostDao storagePoolHostDao() {
+            return Mockito.mock(StoragePoolHostDao.class);
+        }
+
+        @Bean
+        public VolumeDao volumeDao() {
+            return Mockito.mock(VolumeDao.class);
+        }
+
+        @Bean
+        public ConfigurationDao configurationDao() {
+            return Mockito.mock(ConfigurationDao.class);
+        }
+
+        @Bean
+        public DiskOfferingDao diskOfferingDao() {
+            return Mockito.mock(DiskOfferingDao.class);
+        }
+
+        @Bean
+        public PrimaryDataStoreDao primaryDataStoreDao() {
+            return Mockito.mock(PrimaryDataStoreDao.class);
+        }
+
+        @Bean
+        public CapacityDao capacityDao() {
+            return Mockito.mock(CapacityDao.class);
+        }
+
+        @Bean
+        public PlannerHostReservationDao plannerHostReservationDao() {
+            return Mockito.mock(PlannerHostReservationDao.class);
+        }
+
+        @Bean
+        public AffinityGroupProcessor affinityGroupProcessor() {
+            return Mockito.mock(AffinityGroupProcessor.class);
+        }
+
+        @Bean
+        public AffinityGroupDao affinityGroupDao() {
+            return Mockito.mock(AffinityGroupDao.class);
+        }
+
+        @Bean
+        public AffinityGroupVMMapDao affinityGroupVMMapDao() {
+            return Mockito.mock(AffinityGroupVMMapDao.class);
+        }
+
+        @Bean
+        public AccountManager accountManager() {
+            return Mockito.mock(AccountManager.class);
+        }
+
+        @Bean
+        public AgentManager agentManager() {
+            return Mockito.mock(AgentManager.class);
+        }
+
+        @Bean
+        public MessageBus messageBus() {
+            return Mockito.mock(MessageBus.class);
+        }
+
+
+        @Bean
+        public UserVmDao userVMDao() {
+            return Mockito.mock(UserVmDao.class);
+        }
+
+        @Bean
+        public VMInstanceDao vmInstanceDao() {
+            return Mockito.mock(VMInstanceDao.class);
+        }
+
+        @Bean
+        public DataCenterDao dataCenterDao() {
+            return Mockito.mock(DataCenterDao.class);
+        }
+
+        public static class Library implements TypeFilter {
+
+            @Override
+            public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
+                ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class);
+                return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a2eb7bab/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
index b64278c..ba18fa1 100755
--- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
+++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
@@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu
      */
     @Override
     public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA,
-            boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) {
+            boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) {
         // TODO Auto-generated method stub
         return null;
     }


Mime
View raw message