cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h...@apache.org
Subject [1/5] git commit: updated refs/heads/master to 676b2d1
Date Thu, 24 Jul 2014 12:03:01 GMT
Repository: cloudstack
Updated Branches:
  refs/heads/master da55aff64 -> 676b2d156


Fix CID 1114609 Log the correct number


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1440a1c6
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1440a1c6
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1440a1c6

Branch: refs/heads/master
Commit: 1440a1c6a062d562058d9421ec9b9ef66507d44c
Parents: ec43bfc
Author: Hugo Trippaers <htrippaers@schubergphilis.com>
Authored: Thu Jul 24 12:14:06 2014 +0200
Committer: Hugo Trippaers <htrippaers@schubergphilis.com>
Committed: Thu Jul 24 12:21:37 2014 +0200

----------------------------------------------------------------------
 .../deploy/DeploymentPlanningManagerImpl.java   | 249 ++++++++++---------
 1 file changed, 125 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1440a1c6/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
index bb94b76..b5ff5b8 100644
--- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
+++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
@@ -31,6 +31,8 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
+import org.apache.log4j.Logger;
+
 import org.apache.cloudstack.affinity.AffinityGroupProcessor;
 import org.apache.cloudstack.affinity.AffinityGroupService;
 import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
@@ -49,7 +51,6 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.utils.identity.ManagementServerNode;
-import org.apache.log4j.Logger;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.Listener;
@@ -130,7 +131,7 @@ import com.cloud.vm.dao.VMInstanceDao;
 
 @Local(value = {DeploymentPlanningManager.class})
 public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager,
Manager, Listener,
-        StateListener<State, VirtualMachine.Event, VirtualMachine> {
+StateListener<State, VirtualMachine.Event, VirtualMachine> {
 
     private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class);
     @Inject
@@ -276,14 +277,14 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
 
         ServiceOffering offering = vmProfile.getServiceOffering();
         if(planner == null){
-        String plannerName = offering.getDeploymentPlanner();
-        if (plannerName == null) {
-            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
-                plannerName = "BareMetalPlanner";
-            } else {
-                plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
+            String plannerName = offering.getDeploymentPlanner();
+            if (plannerName == null) {
+                if (vm.getHypervisorType() == HypervisorType.BareMetal) {
+                    plannerName = "BareMetalPlanner";
+                } else {
+                    plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
+                }
             }
-        }
             planner = getDeploymentPlannerByName(plannerName);
         }
 
@@ -294,7 +295,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
 
             s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId()
+ ", pod:" + plan.getPodId() + ",cluster:" +
-                plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested
ram: " + ram_requested);
+                    plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested
ram: " + ram_requested);
 
             s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()
!= null ? "Yes" : "No"));
         }
@@ -314,13 +315,13 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             } else {
                 if (s_logger.isDebugEnabled()) {
                     s_logger.debug("Looking for suitable pools for this host under zone:
" + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " +
-                        host.getClusterId());
+                            host.getClusterId());
                 }
 
                 // search for storage under the zone, pod, cluster of the host.
                 DataCenterDeployment lastPlan =
-                    new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(),
hostIdSpecified, plan.getPoolId(), null,
-                        plan.getReservationContext());
+                        new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
host.getClusterId(), hostIdSpecified, plan.getPoolId(), null,
+                                plan.getReservationContext());
 
                 Pair<Map<Volume, List<StoragePool>>, List<Volume>>
result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
                 Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
@@ -331,8 +332,8 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                     List<Host> suitableHosts = new ArrayList<Host>();
                     suitableHosts.add(host);
                     Pair<Host, Map<Volume, StoragePool>> potentialResources =
findPotentialDeploymentResources(
-                        suitableHosts, suitableVolumeStoragePools, avoids,
-                        getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
+                            suitableHosts, suitableVolumeStoragePools, avoids,
+                            getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                     if (potentialResources != null) {
                         Pod pod = _podDao.findById(host.getPodId());
                         Cluster cluster = _clusterDao.findById(host.getClusterId());
@@ -363,7 +364,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 s_logger.debug("The last host of this VM is in avoid set");
             } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
                 s_logger.debug("The last Host, hostId: " + host.getId() +
-                    " already has max Running VMs(count includes system VMs), skipping this
and trying other available hosts");
+                        " already has max Running VMs(count includes system VMs), skipping
this and trying other available hosts");
             } else if ((offeringDetails  = _serviceOfferingDetailsDao.findDetail(offering.getId(),
GPU.Keys.vgpuType.toString())) != null) {
                 ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(),
GPU.Keys.pciDevice.toString());
                 if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(),
offeringDetails.getValue())){
@@ -379,64 +380,64 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                         }
                     }
                     if (hostTagsMatch) {
-                    long cluster_id = host.getClusterId();
+                        long cluster_id = host.getClusterId();
                         ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
                                 "cpuOvercommitRatio");
                         ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
                                 "memoryOvercommitRatio");
-                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
-                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+                        Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+                        Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
                         if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested,
ram_requested, true,
                                 cpuOvercommitRatio, memoryOvercommitRatio, true)
                                 && _capacityMgr.checkIfHostHasCpuCapability(host.getId(),
offering.getCpu(),
                                         offering.getSpeed())) {
-                        s_logger.debug("The last host of this VM is UP and has enough capacity");
+                            s_logger.debug("The last host of this VM is UP and has enough
capacity");
                             s_logger.debug("Now checking for suitable pools under zone: "
+ host.getDataCenterId()
                                     + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
                             // search for storage under the zone, pod, cluster
                             // of
-                        // the last host.
+                            // the last host.
                             DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
                                     host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(),
null);
                             Pair<Map<Volume, List<StoragePool>>, List<Volume>>
result = findSuitablePoolsForVolumes(
                                     vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
-                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools
= result.first();
-                        List<Volume> readyAndReusedVolumes = result.second();
+                            Map<Volume, List<StoragePool>> suitableVolumeStoragePools
= result.first();
+                            List<Volume> readyAndReusedVolumes = result.second();
 
                             // choose the potential pool for this VM for this
                             // host
-                        if (!suitableVolumeStoragePools.isEmpty()) {
-                            List<Host> suitableHosts = new ArrayList<Host>();
-                            suitableHosts.add(host);
-                            Pair<Host, Map<Volume, StoragePool>> potentialResources
= findPotentialDeploymentResources(
-                                suitableHosts, suitableVolumeStoragePools, avoids,
-                                getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
-                            if (potentialResources != null) {
-                                Pod pod = _podDao.findById(host.getPodId());
-                                Cluster cluster = _clusterDao.findById(host.getClusterId());
-                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                                // remove the reused vol<->pool from
+                            if (!suitableVolumeStoragePools.isEmpty()) {
+                                List<Host> suitableHosts = new ArrayList<Host>();
+                                suitableHosts.add(host);
+                                Pair<Host, Map<Volume, StoragePool>> potentialResources
= findPotentialDeploymentResources(
+                                        suitableHosts, suitableVolumeStoragePools, avoids,
+                                        getPlannerUsage(planner, vmProfile, plan, avoids),
readyAndReusedVolumes);
+                                if (potentialResources != null) {
+                                    Pod pod = _podDao.findById(host.getPodId());
+                                    Cluster cluster = _clusterDao.findById(host.getClusterId());
+                                    Map<Volume, StoragePool> storageVolMap = potentialResources.second();
+                                    // remove the reused vol<->pool from
                                     // destination, since we don't have to
                                     // prepare
-                                // this volume.
-                                for (Volume vol : readyAndReusedVolumes) {
-                                    storageVolMap.remove(vol);
-                                }
+                                    // this volume.
+                                    for (Volume vol : readyAndReusedVolumes) {
+                                        storageVolMap.remove(vol);
+                                    }
                                     DeployDestination dest = new DeployDestination(dc, pod,
cluster, host,
                                             storageVolMap);
-                                s_logger.debug("Returning Deployment Destination: " + dest);
-                                return dest;
+                                    s_logger.debug("Returning Deployment Destination: " +
dest);
+                                    return dest;
+                                }
                             }
+                        } else {
+                            s_logger.debug("The last host of this VM does not have enough
capacity");
                         }
                     } else {
-                        s_logger.debug("The last host of this VM does not have enough capacity");
-                    }
-                } else {
                         s_logger.debug("Service Offering host tag does not match the last
host of this VM");
                     }
                 } else {
                     s_logger.debug("The last host of this VM is not UP or is not enabled,
host status is: " + host.getStatus().name() + ", host resource state is: " +
-                        host.getResourceState());
+                            host.getResourceState());
                 }
             }
             s_logger.debug("Cannot choose the last host to deploy this VM ");
@@ -450,21 +451,21 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 if (planner instanceof DeploymentClusterPlanner) {
 
                     ExcludeList plannerAvoidInput =
-                        new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
-                            avoids.getPoolsToAvoid());
+                            new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
+                                    avoids.getPoolsToAvoid());
 
                     clusterList = ((DeploymentClusterPlanner)planner).orderClusters(vmProfile,
plan, avoids);
 
                     if (clusterList != null && !clusterList.isEmpty()) {
                         // planner refactoring. call allocators to list hosts
                         ExcludeList plannerAvoidOutput =
-                            new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
-                                avoids.getPoolsToAvoid());
+                                new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
+                                        avoids.getPoolsToAvoid());
 
                         resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
 
                         dest =
-                            checkClustersforDestination(clusterList, vmProfile, plan, avoids,
dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
+                                checkClustersforDestination(clusterList, vmProfile, plan,
avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
                         if (dest != null) {
                             return dest;
                         }
@@ -591,7 +592,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
     }
 
     private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile
vmProfile, DeploymentPlan plan, ExcludeList avoids)
-        throws InsufficientServerCapacityException {
+            throws InsufficientServerCapacityException {
         if (planner != null && planner instanceof DeploymentClusterPlanner) {
             return ((DeploymentClusterPlanner)planner).getResourceUsage(vmProfile, plan,
avoids);
         } else {
@@ -619,7 +620,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                     return true;
                 } else {
                     s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired
+ ", since this host has been reserved for planner usage : " +
-                        hostResourceType);
+                            hostResourceType);
                     return false;
                 }
             } else {
@@ -629,27 +630,27 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 return Transaction.execute(new TransactionCallback<Boolean>() {
                     @Override
                     public Boolean doInTransaction(TransactionStatus status) {
-                    final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id,
true);
-                    if (lockedEntry == null) {
-                        s_logger.error("Unable to lock the host entry for reservation, host:
" + hostId);
-                        return false;
-                    }
-                    // check before updating
-                    if (lockedEntry.getResourceUsage() == null) {
-                        lockedEntry.setResourceUsage(resourceUsageRequired);
-                        _plannerHostReserveDao.persist(lockedEntry);
-                        return true;
-                    } else {
-                        // someone updated it earlier. check if we can still use it
-                        if (lockedEntry.getResourceUsage() == resourceUsageRequired) {
+                        final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id,
true);
+                        if (lockedEntry == null) {
+                            s_logger.error("Unable to lock the host entry for reservation,
host: " + hostId);
+                            return false;
+                        }
+                        // check before updating
+                        if (lockedEntry.getResourceUsage() == null) {
+                            lockedEntry.setResourceUsage(resourceUsageRequired);
+                            _plannerHostReserveDao.persist(lockedEntry);
                             return true;
                         } else {
+                            // someone updated it earlier. check if we can still use it
+                            if (lockedEntry.getResourceUsage() == resourceUsageRequired)
{
+                                return true;
+                            } else {
                                 s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired
+ ", since this host has been reserved for planner usage : " +
-                                    hostResourceTypeFinal);
-                            return false;
+                                        hostResourceTypeFinal);
+                                return false;
+                            }
                         }
                     }
-                }
                 });
 
             }
@@ -695,7 +696,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 List<VMInstanceVO> vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId,
State.Stopping, State.Migrating, State.Starting);
                 if (vmsStoppingMigratingByHostId.size() > 0) {
                     if (s_logger.isDebugEnabled()) {
-                        s_logger.debug("Cannot release reservation, Found " + vms.size()
+ " VMs stopping/migrating on host " + hostId);
+                        s_logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size()
+ " VMs stopping/migrating/starting on host " + hostId);
                     }
                     return false;
                 }
@@ -721,20 +722,20 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 return Transaction.execute(new TransactionCallback<Boolean>() {
                     @Override
                     public Boolean doInTransaction(TransactionStatus status) {
-                    final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id,
true);
-                    if (lockedEntry == null) {
-                        s_logger.error("Unable to lock the host entry for reservation, host:
" + hostId);
-                        return false;
-                    }
-                    // check before updating
-                    if (lockedEntry.getResourceUsage() != null) {
-                        lockedEntry.setResourceUsage(null);
-                        _plannerHostReserveDao.persist(lockedEntry);
-                        return true;
-                    }
+                        final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id,
true);
+                        if (lockedEntry == null) {
+                            s_logger.error("Unable to lock the host entry for reservation,
host: " + hostId);
+                            return false;
+                        }
+                        // check before updating
+                        if (lockedEntry.getResourceUsage() != null) {
+                            lockedEntry.setResourceUsage(null);
+                            _plannerHostReserveDao.persist(lockedEntry);
+                            return true;
+                        }
 
                         return false;
-                }
+                    }
                 });
             }
 
@@ -833,7 +834,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             public void onPublishMessage(String senderAddress, String subject, Object obj)
{
                 VMInstanceVO vm = ((VMInstanceVO)obj);
                 s_logger.debug("MessageBus message: host reserved capacity released for VM:
" + vm.getLastHostId() +
-                    ", checking if host reservation can be released for host:" + vm.getLastHostId());
+                        ", checking if host reservation can be released for host:" + vm.getLastHostId());
                 Long hostId = vm.getLastHostId();
                 checkHostReservationRelease(hostId);
             }
@@ -890,7 +891,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
 
     // /refactoring planner methods
     private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile
vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc,
-        DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput)
{
+            DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput)
{
 
         if (s_logger.isTraceEnabled()) {
             s_logger.trace("ClusterId List to consider: " + clusterList);
@@ -909,7 +910,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             // search for resources(hosts and storage) under this zone, pod,
             // cluster.
             DataCenterDeployment potentialPlan =
-                new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(),
null, plan.getPoolId(), null, plan.getReservationContext());
+                    new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(),
clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
 
             // find suitable hosts under this cluster, need as many hosts as we
             // get.
@@ -924,15 +925,15 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
                 }
 
                 Pair<Map<Volume, List<StoragePool>>, List<Volume>>
result =
-                    findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
+                        findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
                 Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                 List<Volume> readyAndReusedVolumes = result.second();
 
                 // choose the potential host and pool for the VM
                 if (!suitableVolumeStoragePools.isEmpty()) {
                     Pair<Host, Map<Volume, StoragePool>> potentialResources =
findPotentialDeploymentResources(
-                        suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired,
-                        readyAndReusedVolumes);
+                            suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired,
+                            readyAndReusedVolumes);
 
                     if (potentialResources != null) {
                         Pod pod = _podDao.findById(clusterVO.getPodId());
@@ -965,7 +966,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
     private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput,
VirtualMachineProfile vmProfile) {
 
         ExcludeList allocatorAvoidOutput =
-            new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(),
avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
+                new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
 
         // remove any hosts/pools that the planners might have added
         // to get the list of hosts/pools that Allocators flagged as 'avoid'
@@ -977,7 +978,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
         boolean avoidAllHosts = true, avoidAllPools = true;
 
         List<HostVO> allhostsInCluster =
-            _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(),
clusterVO.getPodId(), clusterVO.getDataCenterId(), null);
+                _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(),
clusterVO.getPodId(), clusterVO.getDataCenterId(), null);
         for (HostVO host : allhostsInCluster) {
             if (!allocatorAvoidOutput.shouldAvoid(host)) {
                 // there's some host in the cluster that is not yet in avoid set
@@ -1003,21 +1004,21 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             boolean vmRequiresLocalStorege = storageRequirements.second();
 
             if (vmRequiresSharedStorage) {
-            // check shared pools
+                // check shared pools
                 List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(),
clusterVO.getPodId(), clusterVO.getId(), null);
-        for (StoragePoolVO pool : allPoolsInCluster) {
-            if (!allocatorAvoidOutput.shouldAvoid(pool)) {
-                // there's some pool in the cluster that is not yet in avoid set
-                avoidAllPools = false;
-                    break;
+                for (StoragePoolVO pool : allPoolsInCluster) {
+                    if (!allocatorAvoidOutput.shouldAvoid(pool)) {
+                        // there's some pool in the cluster that is not yet in avoid set
+                        avoidAllPools = false;
+                        break;
+                    }
                 }
             }
-            }
 
             if (vmRequiresLocalStorege) {
                 // check local pools
                 List<StoragePoolVO> allLocalPoolsInCluster =
-                    _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(),
clusterVO.getPodId(), clusterVO.getId(), null);
+                        _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(),
clusterVO.getPodId(), clusterVO.getId(), null);
                 for (StoragePoolVO pool : allLocalPoolsInCluster) {
                     if (!allocatorAvoidOutput.shouldAvoid(pool)) {
                         // there's some pool in the cluster that is not yet
@@ -1058,7 +1059,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
     }
 
     protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host>
suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools,
-        ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired,
List<Volume> readyAndReusedVolumes) {
+            ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired,
List<Volume> readyAndReusedVolumes) {
         s_logger.debug("Trying to find a potenial host and associated storage pools from
the suitable host/pool lists for this VM");
 
         boolean hostCanAccessPool = false;
@@ -1116,7 +1117,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
             }
             if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(),
resourceUsageRequired)) {
                 s_logger.debug("Found a potential host " + "id: " + potentialHost.getId()
+ " name: " + potentialHost.getName() +
-                    " and associated storage pools for this VM");
+                        " and associated storage pools for this VM");
                 return new Pair<Host, Map<Volume, StoragePool>>(potentialHost,
storage);
             } else {
                 avoid.addHost(potentialHost.getId());
@@ -1382,43 +1383,43 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements
Deploy
         return Transaction.execute(new TransactionCallback<String>() {
             @Override
             public String doInTransaction(TransactionStatus status) {
-        boolean saveReservation = true;
-
-            if (vmGroupCount > 0) {
-                List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId());
-                SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria();
-                criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()]));
-                List<AffinityGroupVO> groups = _affinityGroupDao.lockRows(criteria,
null, true);
-
-                for (AffinityGroupProcessor processor : _affinityProcessors) {
-                    if (!processor.check(vmProfile, plannedDestination)) {
-                        saveReservation = false;
-                        break;
+                boolean saveReservation = true;
+
+                if (vmGroupCount > 0) {
+                    List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId());
+                    SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria();
+                    criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()]));
+                    _affinityGroupDao.lockRows(criteria, null, true);
+
+                    for (AffinityGroupProcessor processor : _affinityProcessors) {
+                        if (!processor.check(vmProfile, plannedDestination)) {
+                            saveReservation = false;
+                            break;
+                        }
                     }
                 }
-            }
 
-            if (saveReservation) {
+                if (saveReservation) {
                     VMReservationVO vmReservation =
-                        new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(),
plannedDestination.getPod().getId(), plannedDestination.getCluster()
-                            .getId(), plannedDestination.getHost().getId());
+                            new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(),
plannedDestination.getPod().getId(), plannedDestination.getCluster()
+                                    .getId(), plannedDestination.getHost().getId());
                     if (planner != null) {
                         vmReservation.setDeploymentPlanner(planner.getName());
                     }
-                Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>();
+                    Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>();
 
-                if (vm.getHypervisorType() != HypervisorType.BareMetal) {
-                    for (Volume vo : plannedDestination.getStorageForDisks().keySet()) {
-                        volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId());
+                    if (vm.getHypervisorType() != HypervisorType.BareMetal) {
+                        for (Volume vo : plannedDestination.getStorageForDisks().keySet())
{
+                            volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId());
+                        }
+                        vmReservation.setVolumeReservation(volumeReservationMap);
                     }
-                    vmReservation.setVolumeReservation(volumeReservationMap);
+                    _reservationDao.persist(vmReservation);
+                    return vmReservation.getUuid();
                 }
-                _reservationDao.persist(vmReservation);
-                return vmReservation.getUuid();
-            }
 
-        return null;
-    }
+                return null;
+            }
         });
     }
 


Mime
View raw message