Return-Path: X-Original-To: apmail-cloudstack-commits-archive@www.apache.org Delivered-To: apmail-cloudstack-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 560A111C41 for ; Tue, 22 Jul 2014 09:56:31 +0000 (UTC) Received: (qmail 73645 invoked by uid 500); 22 Jul 2014 09:56:31 -0000 Delivered-To: apmail-cloudstack-commits-archive@cloudstack.apache.org Received: (qmail 73617 invoked by uid 500); 22 Jul 2014 09:56:31 -0000 Mailing-List: contact commits-help@cloudstack.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cloudstack.apache.org Delivered-To: mailing list commits@cloudstack.apache.org Received: (qmail 73607 invoked by uid 99); 22 Jul 2014 09:56:31 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 22 Jul 2014 09:56:31 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id DCFD19AEC3F; Tue, 22 Jul 2014 09:56:30 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: likithas@apache.org To: commits@cloudstack.apache.org Message-Id: <080dcfcf2b2c4939bad350c4e59e4d70@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: git commit: updated refs/heads/master to 493fd17 Date: Tue, 22 Jul 2014 09:56:30 +0000 (UTC) Repository: cloudstack Updated Branches: refs/heads/master b3e4c6d6d -> 493fd1705 CLOUDSTACK-7152. Attaching datadisk to VMs that have VM snapshot throws 'Unexpected exception'. Like with other API commands, ensure input parameter validation for AttachVolume happens outside the job queue. Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/493fd170 Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/493fd170 Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/493fd170 Branch: refs/heads/master Commit: 493fd170543658ac7602e230a336985c59e8e1a1 Parents: b3e4c6d Author: Likitha Shetty Authored: Mon Jul 14 15:04:37 2014 +0530 Committer: Likitha Shetty Committed: Tue Jul 22 15:13:37 2014 +0530 ---------------------------------------------------------------------- .../com/cloud/storage/VolumeApiServiceImpl.java | 193 ++++++++++--------- 1 file changed, 98 insertions(+), 95 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cloudstack/blob/493fd170/server/src/com/cloud/storage/VolumeApiServiceImpl.java ---------------------------------------------------------------------- diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index 64d6bc0..ac2a881 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -1141,50 +1141,77 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override public Volume attachVolumeToVM(AttachVolumeCmd command) { + return attachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId()); + } - AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); - if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { - // avoid re-entrance - - VmWorkJobVO placeHolder = null; - if (VmJobEnabled.value()) { - placeHolder = createPlaceHolderWork(command.getVirtualMachineId()); + private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { + VolumeInfo volumeToAttach = volFactory.getVolume(volumeId); + UserVmVO vm = _userVmDao.findById(vmId); + VolumeVO exstingVolumeOfVm = null; + List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.ROOT); + if (rootVolumesOfVm.size() > 1) { + throw new CloudRuntimeException("The VM " + vm.getHostName() + " has more than one ROOT volume and is in an invalid state."); + } else { + if (!rootVolumesOfVm.isEmpty()) { + exstingVolumeOfVm = rootVolumesOfVm.get(0); + } else { + // locate data volume of the vm + List diskVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); + for (VolumeVO diskVolume : diskVolumesOfVm) { + if (diskVolume.getState() != Volume.State.Allocated) { + exstingVolumeOfVm = diskVolume; + break; + } + } } + } + + HypervisorType rootDiskHyperType = vm.getHypervisorType(); + HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId()); + + VolumeInfo newVolumeOnPrimaryStorage = volumeToAttach; + + //don't create volume on primary storage if its being attached to the vm which Root's volume hasn't been created yet + StoragePoolVO destPrimaryStorage = null; + if (exstingVolumeOfVm != null && !exstingVolumeOfVm.getState().equals(Volume.State.Allocated)) { + destPrimaryStorage = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId()); + } + + if (destPrimaryStorage != null && (volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Uploaded)) { try { - return orchestrateAttachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId()); - } finally { - if (VmJobEnabled.value()) - _workJobDao.expunge(placeHolder.getId()); + newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage); + } catch (NoTransitionException e) { + s_logger.debug("Failed to create volume on primary storage", e); + throw new CloudRuntimeException("Failed to create volume on primary storage", e); } + } - } else { - Outcome outcome = attachVolumeToVmThroughJobQueue(command.getVirtualMachineId(), command.getId(), command.getDeviceId()); + // reload the volume from db + newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId()); + boolean moveVolumeNeeded = needMoveVolume(exstingVolumeOfVm, newVolumeOnPrimaryStorage); - Volume vol = null; - try { - outcome.get(); - } catch (InterruptedException e) { - throw new RuntimeException("Operation is interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { - throw new RuntimeException("Execution excetion", e); + if (moveVolumeNeeded) { + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore(); + if (primaryStore.isLocal()) { + throw new CloudRuntimeException("Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + + " as migration of local data volume is not allowed"); } + StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId()); - Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); - if (jobResult != null) { - if (jobResult instanceof ConcurrentOperationException) - throw (ConcurrentOperationException)jobResult; - else if (jobResult instanceof Throwable) - throw new RuntimeException("Unexpected exception", (Throwable)jobResult); - else if (jobResult instanceof Long) { - vol = _volsDao.findById((Long) jobResult); - } + try { + newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), + vmRootVolumePool.getClusterId(), volumeToAttachHyperType); + } catch (ConcurrentOperationException e) { + s_logger.debug("move volume failed", e); + throw new CloudRuntimeException("move volume failed", e); + } catch (StorageUnavailableException e) { + s_logger.debug("move volume failed", e); + throw new CloudRuntimeException("move volume failed", e); } - return vol; } - } - - private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { - return attachVolumeToVM(vmId, volumeId, deviceId); + VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId()); + newVol = sendAttachVolumeCommand(vm, newVol, deviceId); + return newVol; } @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) @@ -1275,25 +1302,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state"); } - VolumeVO exstingVolumeOfVm = null; - List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.ROOT); - if (rootVolumesOfVm.size() > 1) { - throw new CloudRuntimeException("The VM " + vm.getHostName() + " has more than one ROOT volume and is in an invalid state."); - } else { - if (!rootVolumesOfVm.isEmpty()) { - exstingVolumeOfVm = rootVolumesOfVm.get(0); - } else { - // locate data volume of the vm - List diskVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); - for (VolumeVO diskVolume : diskVolumesOfVm) { - if (diskVolume.getState() != Volume.State.Allocated) { - exstingVolumeOfVm = diskVolume; - break; - } - } - } - } - HypervisorType rootDiskHyperType = vm.getHypervisorType(); HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId()); @@ -1307,62 +1315,57 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } - VolumeInfo newVolumeOnPrimaryStorage = volumeToAttach; + AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext(); - //don't create volume on primary storage if its being attached to the vm which Root's volume hasn't been created yet - StoragePoolVO destPrimaryStorage = null; - if (exstingVolumeOfVm != null && !exstingVolumeOfVm.getState().equals(Volume.State.Allocated)) { - destPrimaryStorage = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId()); - } + if (asyncExecutionContext != null) { + AsyncJob job = asyncExecutionContext.getJob(); - if (destPrimaryStorage != null && (volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Uploaded)) { - try { - newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage); - } catch (NoTransitionException e) { - s_logger.debug("Failed to create volume on primary storage", e); - throw new CloudRuntimeException("Failed to create volume on primary storage", e); + if (s_logger.isInfoEnabled()) { + s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); } + + _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); } - // reload the volume from db - newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId()); - boolean moveVolumeNeeded = needMoveVolume(exstingVolumeOfVm, newVolumeOnPrimaryStorage); + AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); + if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { + // avoid re-entrance - if (moveVolumeNeeded) { - PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore(); - if (primaryStore.isLocal()) { - throw new CloudRuntimeException("Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() - + " as migration of local data volume is not allowed"); + VmWorkJobVO placeHolder = null; + if (VmJobEnabled.value()) { + placeHolder = createPlaceHolderWork(vmId); } - StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId()); - try { - newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), volumeToAttachHyperType); - } catch (ConcurrentOperationException e) { - s_logger.debug("move volume failed", e); - throw new CloudRuntimeException("move volume failed", e); - } catch (StorageUnavailableException e) { - s_logger.debug("move volume failed", e); - throw new CloudRuntimeException("move volume failed", e); + return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId); + } finally { + if (VmJobEnabled.value()) + _workJobDao.expunge(placeHolder.getId()); } - } - AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext(); - - if (asyncExecutionContext != null) { - AsyncJob job = asyncExecutionContext.getJob(); + } else { + Outcome outcome = attachVolumeToVmThroughJobQueue(vmId, volumeId, deviceId); - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); + Volume vol = null; + try { + outcome.get(); + } catch (InterruptedException e) { + throw new RuntimeException("Operation is interrupted", e); + } catch (java.util.concurrent.ExecutionException e) { + throw new RuntimeException("Execution excetion", e); } - _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); + Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); + if (jobResult != null) { + if (jobResult instanceof ConcurrentOperationException) + throw (ConcurrentOperationException)jobResult; + else if (jobResult instanceof Throwable) + throw new RuntimeException("Unexpected exception", (Throwable)jobResult); + else if (jobResult instanceof Long) { + vol = _volsDao.findById((Long)jobResult); + } + } + return vol; } - - VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId()); - newVol = sendAttachVolumeCommand(vm, newVol, deviceId); - return newVol; } @Override