From commits-return-70914-archive-asf-public=cust-asf.ponee.io@cloudstack.apache.org Sun Jan 14 19:36:04 2018 Return-Path: X-Original-To: archive-asf-public@eu.ponee.io Delivered-To: archive-asf-public@eu.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by mx-eu-01.ponee.io (Postfix) with ESMTP id 2CA77180651 for ; Sun, 14 Jan 2018 19:36:04 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 1BC59160C44; Sun, 14 Jan 2018 18:36:04 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 8839F160C22 for ; Sun, 14 Jan 2018 19:35:56 +0100 (CET) Received: (qmail 32902 invoked by uid 500); 14 Jan 2018 18:35:55 -0000 Mailing-List: contact commits-help@cloudstack.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cloudstack.apache.org Delivered-To: mailing list commits@cloudstack.apache.org Received: (qmail 32893 invoked by uid 99); 14 Jan 2018 18:35:55 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 14 Jan 2018 18:35:55 +0000 From: GitBox To: commits@cloudstack.apache.org Subject: [GitHub] rhtyd closed pull request #2298: CLOUDSTACK-9620: Enhancements for managed storage Message-ID: <151595495425.21610.9520971964021762381.gitbox@gitbox.apache.org> rhtyd closed pull request #2298: CLOUDSTACK-9620: Enhancements for managed storage URL: https://github.com/apache/cloudstack/pull/2298 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/api/src/com/cloud/agent/api/to/DiskTO.java b/api/src/com/cloud/agent/api/to/DiskTO.java index f982844486a..7b3d10bc4db 100644 --- a/api/src/com/cloud/agent/api/to/DiskTO.java +++ b/api/src/com/cloud/agent/api/to/DiskTO.java @@ -27,6 +27,7 @@ public static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret"; public static final String CHAP_TARGET_USERNAME = "chapTargetUsername"; public static final String CHAP_TARGET_SECRET = "chapTargetSecret"; + public static final String SCSI_NAA_DEVICE_ID = "scsiNaaDeviceId"; public static final String MANAGED = "managed"; public static final String IQN = "iqn"; public static final String STORAGE_HOST = "storageHost"; @@ -36,6 +37,9 @@ public static final String PROTOCOL_TYPE = "protocoltype"; public static final String PATH = "path"; public static final String UUID = "uuid"; + public static final String VMDK = "vmdk"; + public static final String EXPAND_DATASTORE = "expandDatastore"; + public static final String TEMPLATE_RESIGN = "templateResign"; private DataTO data; private Long diskSeq; diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 4ec94494ac1..8eea632a72e 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -78,6 +78,14 @@ /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// + public ResizeVolumeCmd() {} + + public ResizeVolumeCmd(Long id, Long minIops, Long maxIops) { + this.id = id; + this.minIops = minIops; + this.maxIops = maxIops; + } + //TODO use the method getId() instead of this one. public Long getEntityId() { return id; diff --git a/client/pom.xml b/client/pom.xml index cfc1f873826..2d6a2f83d38 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -1151,6 +1151,21 @@ + + vmwaresioc + + + noredist + + + + + org.apache.cloudstack + cloud-plugin-api-vmware-sioc + ${project.version} + + + quickcloud diff --git a/core/src/com/cloud/agent/api/MigrateCommand.java b/core/src/com/cloud/agent/api/MigrateCommand.java index 9d1f83ac03f..3e7dfc1b1be 100644 --- a/core/src/com/cloud/agent/api/MigrateCommand.java +++ b/core/src/com/cloud/agent/api/MigrateCommand.java @@ -19,15 +19,20 @@ package com.cloud.agent.api; +import java.util.HashMap; +import java.util.Map; + import com.cloud.agent.api.to.VirtualMachineTO; public class MigrateCommand extends Command { - String vmName; - String destIp; - String hostGuid; - boolean isWindows; - VirtualMachineTO vmTO; - boolean executeInSequence = false; + private String vmName; + private String destIp; + private Map migrateStorage; + private boolean autoConvergence; + private String hostGuid; + private boolean isWindows; + private VirtualMachineTO vmTO; + private boolean executeInSequence = false; protected MigrateCommand() { } @@ -40,6 +45,22 @@ public MigrateCommand(String vmName, String destIp, boolean isWindows, VirtualMa this.executeInSequence = executeInSequence; } + public void setMigrateStorage(Map migrateStorage) { + this.migrateStorage = migrateStorage; + } + + public Map getMigrateStorage() { + return migrateStorage != null ? new HashMap<>(migrateStorage) : new HashMap(); + } + + public void setAutoConvergence(boolean autoConvergence) { + this.autoConvergence = autoConvergence; + } + + public boolean isAutoConvergence() { + return autoConvergence; + } + public boolean isWindows() { return isWindows; } @@ -68,4 +89,67 @@ public String getHostGuid() { public boolean executeInSequence() { return executeInSequence; } + + public static class MigrateDiskInfo { + public enum DiskType { + FILE, BLOCK; + + @Override + public String toString() { + return name().toLowerCase(); + } + } + + public enum DriverType { + QCOW2, RAW; + + @Override + public String toString() { + return name().toLowerCase(); + } + } + + public enum Source { + FILE, DEV; + + @Override + public String toString() { + return name().toLowerCase(); + } + } + + private final String serialNumber; + private final DiskType diskType; + private final DriverType driverType; + private final Source source; + private final String sourceText; + + public MigrateDiskInfo(final String serialNumber, final DiskType diskType, final DriverType driverType, final Source source, final String sourceText) { + this.serialNumber = serialNumber; + this.diskType = diskType; + this.driverType = driverType; + this.source = source; + this.sourceText = sourceText; + } + + public String getSerialNumber() { + return serialNumber; + } + + public DiskType getDiskType() { + return diskType; + } + + public DriverType getDriverType() { + return driverType; + } + + public Source getSource() { + return source; + } + + public String getSourceText() { + return sourceText; + } + } } diff --git a/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java b/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java index c192e4a0dfa..0c2afcf36a4 100644 --- a/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java +++ b/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java @@ -19,5 +19,16 @@ package com.cloud.agent.api; +import java.util.List; + public class ModifyTargetsAnswer extends Answer { + private List connectedPaths; + + public void setConnectedPaths(List connectedPaths) { + this.connectedPaths = connectedPaths; + } + + public List getConnectedPaths() { + return connectedPaths; + } } diff --git a/core/src/com/cloud/agent/api/ModifyTargetsCommand.java b/core/src/com/cloud/agent/api/ModifyTargetsCommand.java index 424d7974952..9f4935bf021 100644 --- a/core/src/com/cloud/agent/api/ModifyTargetsCommand.java +++ b/core/src/com/cloud/agent/api/ModifyTargetsCommand.java @@ -23,7 +23,11 @@ import java.util.Map; public class ModifyTargetsCommand extends Command { + public enum TargetTypeToRemove { BOTH, NEITHER, STATIC, DYNAMIC } + public static final String IQN = "iqn"; + public static final String STORAGE_TYPE = "storageType"; + public static final String STORAGE_UUID = "storageUuid"; public static final String STORAGE_HOST = "storageHost"; public static final String STORAGE_PORT = "storagePort"; public static final String CHAP_NAME = "chapName"; @@ -32,6 +36,9 @@ public static final String MUTUAL_CHAP_SECRET = "mutualChapSecret"; private boolean add; + private boolean applyToAllHostsInCluster; + private TargetTypeToRemove targetTypeToRemove = TargetTypeToRemove.BOTH; + private boolean removeAsync; private List> targets; public void setAdd(boolean add) { @@ -42,6 +49,30 @@ public boolean getAdd() { return add; } + public void setApplyToAllHostsInCluster(boolean applyToAllHostsInCluster) { + this.applyToAllHostsInCluster = applyToAllHostsInCluster; + } + + public boolean getApplyToAllHostsInCluster() { + return applyToAllHostsInCluster; + } + + public void setTargetTypeToRemove(TargetTypeToRemove targetTypeToRemove) { + this.targetTypeToRemove = targetTypeToRemove; + } + + public TargetTypeToRemove getTargetTypeToRemove() { + return targetTypeToRemove; + } + + public void setRemoveAsync(boolean removeAsync) { + this.removeAsync = removeAsync; + } + + public boolean isRemoveAsync() { + return removeAsync; + } + public void setTargets(List> targets) { this.targets = targets; } diff --git a/core/src/com/cloud/agent/api/PrepareForMigrationCommand.java b/core/src/com/cloud/agent/api/PrepareForMigrationCommand.java index 6b89654e604..a2c4f67026c 100644 --- a/core/src/com/cloud/agent/api/PrepareForMigrationCommand.java +++ b/core/src/com/cloud/agent/api/PrepareForMigrationCommand.java @@ -22,7 +22,8 @@ import com.cloud.agent.api.to.VirtualMachineTO; public class PrepareForMigrationCommand extends Command { - VirtualMachineTO vm; + private VirtualMachineTO vm; + private boolean rollback; protected PrepareForMigrationCommand() { } @@ -35,6 +36,14 @@ public VirtualMachineTO getVirtualMachine() { return vm; } + public void setRollback(boolean rollback) { + this.rollback = rollback; + } + + public boolean isRollback() { + return rollback; + } + @Override public boolean executeInSequence() { return true; diff --git a/core/src/com/cloud/agent/api/StartAnswer.java b/core/src/com/cloud/agent/api/StartAnswer.java index 6f63f20b010..d5d0e83b634 100644 --- a/core/src/com/cloud/agent/api/StartAnswer.java +++ b/core/src/com/cloud/agent/api/StartAnswer.java @@ -24,9 +24,16 @@ import com.cloud.agent.api.to.VirtualMachineTO; public class StartAnswer extends Answer { + public static final String PATH = "path"; + public static final String IMAGE_FORMAT = "imageFormat"; + VirtualMachineTO vm; String hostGuid; - Map _iqnToPath; + // key = an applicable IQN (ex. iqn.1998-01.com.vmware.iscsi:name1) + // value = a Map with the following data: + // key = PATH or IMAGE_FORMAT (defined above) + // value = Example if PATH is key: UUID of VDI; Example if IMAGE_FORMAT is key: DiskTO.VHD + private Map> _iqnToData; protected StartAnswer() { } @@ -61,11 +68,11 @@ public String getHost_guid() { return hostGuid; } - public void setIqnToPath(Map iqnToPath) { - _iqnToPath = iqnToPath; + public void setIqnToData(Map> iqnToData) { + _iqnToData = iqnToData; } - public Map getIqnToPath() { - return _iqnToPath; + public Map> getIqnToData() { + return _iqnToData; } } diff --git a/core/src/com/cloud/agent/api/StopCommand.java b/core/src/com/cloud/agent/api/StopCommand.java index e1d68f84661..eedf736ac4c 100644 --- a/core/src/com/cloud/agent/api/StopCommand.java +++ b/core/src/com/cloud/agent/api/StopCommand.java @@ -22,6 +22,10 @@ import com.cloud.agent.api.to.GPUDeviceTO; import com.cloud.vm.VirtualMachine; +import java.util.ArrayList; +import java.util.Map; +import java.util.List; + public class StopCommand extends RebootCommand { private boolean isProxy = false; private String urlPort = null; @@ -30,6 +34,11 @@ boolean checkBeforeCleanup = false; String controlIp = null; boolean forceStop = false; + /** + * On KVM when using iSCSI-based managed storage, if the user shuts a VM down from the guest OS (as opposed to doing so from CloudStack), + * we need to pass to the KVM agent a list of applicable iSCSI volumes that need to be disconnected. + */ + private List> volumesToDisconnect = new ArrayList<>(); protected StopCommand() { } @@ -102,4 +111,12 @@ public void setControlIp(String controlIp){ public boolean isForceStop() { return forceStop; } + + public void setVolumesToDisconnect(List> volumesToDisconnect) { + this.volumesToDisconnect = volumesToDisconnect; + } + + public List> getVolumesToDisconnect() { + return volumesToDisconnect; + } } diff --git a/core/src/com/cloud/agent/api/storage/CopyVolumeCommand.java b/core/src/com/cloud/agent/api/storage/CopyVolumeCommand.java index a75e9ba3be2..863a9cad65b 100644 --- a/core/src/com/cloud/agent/api/storage/CopyVolumeCommand.java +++ b/core/src/com/cloud/agent/api/storage/CopyVolumeCommand.java @@ -19,18 +19,22 @@ package com.cloud.agent.api.storage; +import java.util.Map; + +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.storage.StoragePool; public class CopyVolumeCommand extends StorageNfsVersionCommand { - - long volumeId; - String volumePath; - StorageFilerTO pool; - String secondaryStorageURL; - boolean toSecondaryStorage; - String vmName; - boolean executeInSequence = false; + private long volumeId; + private String volumePath; + private StorageFilerTO pool; + private String secondaryStorageURL; + private boolean toSecondaryStorage; + private String vmName; + private DataTO srcData; + private Map srcDetails; + private boolean executeInSequence; public CopyVolumeCommand() { } @@ -75,4 +79,19 @@ public String getVmName() { return vmName; } + public void setSrcData(DataTO srcData) { + this.srcData = srcData; + } + + public DataTO getSrcData() { + return srcData; + } + + public void setSrcDetails(Map srcDetails) { + this.srcDetails = srcDetails; + } + + public Map getSrcDetails() { + return srcDetails; + } } diff --git a/core/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java b/core/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java index b409944b77f..77430c39808 100644 --- a/core/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java +++ b/core/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java @@ -19,19 +19,26 @@ package com.cloud.agent.api.storage; +import java.util.Map; + import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; public class MigrateVolumeCommand extends Command { - long volumeId; String volumePath; StorageFilerTO pool; String attachedVmName; Volume.Type volumeType; + private DataTO srcData; + private DataTO destData; + private Map srcDetails; + private Map destDetails; + public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, int timeout) { this.volumeId = volumeId; this.volumePath = volumePath; @@ -48,6 +55,15 @@ public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, this.setWait(timeout); } + public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map srcDetails, Map destDetails, int timeout) { + this.srcData = srcData; + this.destData = destData; + this.srcDetails = srcDetails; + this.destDetails = destDetails; + + setWait(timeout); + } + @Override public boolean executeInSequence() { return true; @@ -72,4 +88,24 @@ public String getAttachedVmName() { public Volume.Type getVolumeType() { return volumeType; } + + public DataTO getSrcData() { + return srcData; + } + + public DataTO getDestData() { + return destData; + } + + public Map getSrcDetails() { + return srcDetails; + } + + public Map getDestDetails() { + return destDetails; + } + + public int getWaitInMillSeconds() { + return getWait() * 1000; + } } \ No newline at end of file diff --git a/core/src/com/cloud/storage/template/TemplateLocation.java b/core/src/com/cloud/storage/template/TemplateLocation.java index d10d05ae971..c10acc10724 100644 --- a/core/src/com/cloud/storage/template/TemplateLocation.java +++ b/core/src/com/cloud/storage/template/TemplateLocation.java @@ -205,6 +205,7 @@ public boolean addFormat(FormatInfo newInfo) { } _props.setProperty("virtualsize", Long.toString(newInfo.virtualSize)); + _props.setProperty("size", Long.toString(newInfo.size)); _formats.add(newInfo); return true; } diff --git a/core/src/org/apache/cloudstack/storage/command/CopyCommand.java b/core/src/org/apache/cloudstack/storage/command/CopyCommand.java index e7ebab812c1..aac082a0133 100644 --- a/core/src/org/apache/cloudstack/storage/command/CopyCommand.java +++ b/core/src/org/apache/cloudstack/storage/command/CopyCommand.java @@ -29,8 +29,8 @@ private DataTO destTO; private DataTO cacheTO; private boolean executeInSequence = false; - private Map options = new HashMap(); - private Map options2 = new HashMap(); + private Map options = new HashMap<>(); + private Map options2 = new HashMap<>(); public CopyCommand(final DataTO srcData, final DataTO destData, final int timeout, final boolean executeInSequence) { super(); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java index 2cde5bdc155..f537d8f5202 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java @@ -36,5 +36,9 @@ /** * indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a volume from a volume) */ - CAN_CREATE_VOLUME_FROM_VOLUME + CAN_CREATE_VOLUME_FROM_VOLUME, + /** + * indicates that this driver supports reverting a volume to a snapshot state + */ + CAN_REVERT_VOLUME_TO_SNAPSHOT } diff --git a/engine/components-api/src/com/cloud/storage/StorageManager.java b/engine/components-api/src/com/cloud/storage/StorageManager.java index d314fb33bd6..530a7dea3cc 100644 --- a/engine/components-api/src/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/com/cloud/storage/StorageManager.java @@ -44,14 +44,54 @@ import com.cloud.vm.VMInstanceVO; public interface StorageManager extends StorageService { - static final ConfigKey StorageCleanupInterval = new ConfigKey(Integer.class, "storage.cleanup.interval", "Advanced", "86400", - "The interval (in seconds) to wait before running the storage cleanup thread.", false, ConfigKey.Scope.Global, null); - static final ConfigKey StorageCleanupDelay = new ConfigKey(Integer.class, "storage.cleanup.delay", "Advanced", "86400", - "Determines how long (in seconds) to wait before actually expunging destroyed volumes. The default value = the default value of storage.cleanup.interval.", false, ConfigKey.Scope.Global, null); - static final ConfigKey StorageCleanupEnabled = new ConfigKey(Boolean.class, "storage.cleanup.enabled", "Advanced", "true", - "Enables/disables the storage cleanup thread.", false, ConfigKey.Scope.Global, null); - static final ConfigKey TemplateCleanupEnabled = new ConfigKey(Boolean.class, "storage.template.cleanup.enabled", "Storage", "true", - "Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled", false, ConfigKey.Scope.Global, null); + ConfigKey StorageCleanupInterval = new ConfigKey<>(Integer.class, + "storage.cleanup.interval", + "Advanced", + "86400", + "The interval (in seconds) to wait before running the storage cleanup thread.", + false, + ConfigKey.Scope.Global, + null); + ConfigKey StorageCleanupDelay = new ConfigKey<>(Integer.class, + "storage.cleanup.delay", + "Advanced", + "86400", + "Determines how long (in seconds) to wait before actually expunging destroyed volumes. The default value = the default value of storage.cleanup.interval.", + false, + ConfigKey.Scope.Global, + null); + ConfigKey StorageCleanupEnabled = new ConfigKey<>(Boolean.class, + "storage.cleanup.enabled", + "Advanced", + "true", + "Enables/disables the storage cleanup thread.", + false, + ConfigKey.Scope.Global, + null); + ConfigKey TemplateCleanupEnabled = new ConfigKey<>(Boolean.class, + "storage.template.cleanup.enabled", + "Storage", + "true", + "Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled", + false, + ConfigKey.Scope.Global, + null); + ConfigKey KvmStorageOfflineMigrationWait = new ConfigKey<>(Integer.class, + "kvm.storage.offline.migration.wait", + "Storage", + "10800", + "Timeout in seconds for offline (non-live) storage migration to complete on KVM", + true, + ConfigKey.Scope.Global, + null); + ConfigKey KvmStorageOnlineMigrationWait = new ConfigKey<>(Integer.class, + "kvm.storage.online.migration.wait", + "Storage", + "10800", + "Timeout in seconds for online (live) storage migration to complete on KVM (migrateVirtualMachineWithVolume)", + true, + ConfigKey.Scope.Global, + null); /** * Returns a comma separated list of tags for the specified storage pool @@ -102,6 +142,8 @@ Host updateSecondaryStorage(long secStorageId, String newUrl); + void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool); + List getUpHostsInPool(long poolId); void cleanupSecondaryStorage(boolean recurring); diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml index c69e29c9970..dab3c389c0b 100755 --- a/engine/orchestration/pom.xml +++ b/engine/orchestration/pom.xml @@ -58,6 +58,11 @@ cloud-utils ${project.version} + + org.apache.cloudstack + cloud-server + ${project.version} + diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 9916728f2db..375b0910624 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -39,6 +39,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -85,6 +86,7 @@ import com.cloud.agent.api.ClusterVMMetaDataSyncCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PlugNicAnswer; import com.cloud.agent.api.PlugNicCommand; @@ -114,6 +116,7 @@ import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; import com.cloud.capacity.CapacityManager; +import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.DataCenter; @@ -536,6 +539,8 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); + List> targets = getTargets(hostId, vm.getId()); + if (volumeExpungeCommands != null && volumeExpungeCommands.size() > 0 && hostId != null) { final Commands cmds = new Commands(Command.OnError.Stop); @@ -563,6 +568,10 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti // Clean up volumes based on the vm's instance id volumeMgr.cleanupVolumes(vm.getId()); + if (hostId != null && CollectionUtils.isNotEmpty(targets)) { + removeDynamicTargets(hostId, targets); + } + final VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); //remove the overcommit detials from the uservm details @@ -599,6 +608,64 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti } + private List> getTargets(Long hostId, long vmId) { + List> targets = new ArrayList<>(); + + HostVO hostVO = _hostDao.findById(hostId); + + if (hostVO == null || hostVO.getHypervisorType() != HypervisorType.VMware) { + return targets; + } + + List volumes = _volsDao.findByInstance(vmId); + + if (CollectionUtils.isEmpty(volumes)) { + return targets; + } + + for (VolumeVO volume : volumes) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); + + if (storagePoolVO != null && storagePoolVO.isManaged()) { + Map target = new HashMap<>(); + + target.put(ModifyTargetsCommand.STORAGE_HOST, storagePoolVO.getHostAddress()); + target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + target.put(ModifyTargetsCommand.IQN, volume.get_iScsiName()); + + targets.add(target); + } + } + + return targets; + } + + private void removeDynamicTargets(long hostId, List> targets) { + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(false); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); + + sendModifyTargetsCommand(cmd, hostId); + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + String msg = "Unable to get an answer to the modify targets command"; + + s_logger.warn(msg); + } + else if (!answer.getResult()) { + String msg = "Unable to modify target on the following host: " + hostId; + + s_logger.warn(msg); + } + } + @Override public boolean start() { // TODO, initial delay is hardcoded @@ -1073,8 +1140,10 @@ public void orchestrateStart(final String vmUuid, final Map iqnToPath) { - if (disks != null && iqnToPath != null) { + private void handlePath(final DiskTO[] disks, final Map> iqnToData) { + if (disks != null && iqnToData != null) { for (final DiskTO disk : disks) { final Map details = disk.getDetails(); final boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); @@ -1264,12 +1333,31 @@ private void handlePath(final DiskTO[] disks, final Map iqnToPat final Long volumeId = disk.getData().getId(); final VolumeVO volume = _volsDao.findById(volumeId); final String iScsiName = volume.get_iScsiName(); - final String path = iqnToPath.get(iScsiName); - if (path != null) { - volume.setPath(path); + boolean update = false; + + final Map data = iqnToData.get(iScsiName); + + if (data != null) { + final String path = data.get(StartAnswer.PATH); + + if (path != null) { + volume.setPath(path); - _volsDao.update(volumeId, volume); + update = true; + } + + final String imageFormat = data.get(StartAnswer.IMAGE_FORMAT); + + if (imageFormat != null) { + volume.setFormat(ImageFormat.valueOf(imageFormat)); + + update = true; + } + + if (update) { + _volsDao.update(volumeId, volume); + } } } } @@ -1331,10 +1419,37 @@ public boolean getExecuteInSequence(final HypervisorType hypervisorType) { } } + private List> getVolumesToDisconnect(VirtualMachine vm) { + List> volumesToDisconnect = new ArrayList<>(); + + List volumes = _volsDao.findByInstance(vm.getId()); + + if (CollectionUtils.isEmpty(volumes)) { + return volumesToDisconnect; + } + + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); + + if (storagePool != null && storagePool.isManaged()) { + Map info = new HashMap<>(3); + + info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); + info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); + info.put(DiskTO.IQN, volume.get_iScsiName()); + + volumesToDisconnect.add(info); + } + } + + return volumesToDisconnect; + } + protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final boolean force, final boolean checkBeforeCleanup) { final VirtualMachine vm = profile.getVirtualMachine(); StopCommand stpCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup); stpCmd.setControlIp(getControlNicIpForVM(vm)); + stpCmd.setVolumesToDisconnect(getVolumesToDisconnect(vm)); final StopCommand stop = stpCmd; try { Answer answer = null; @@ -2103,6 +2218,12 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); + + String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString()); + boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence); + + mc.setAutoConvergence(kvmAutoConvergence); + mc.setHostGuid(dest.getHost().getGuid()); try { @@ -2176,32 +2297,48 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy final Map volumeToPoolObjectMap = new HashMap<>(); for (final VolumeVO volume : allVolumes) { - final Long poolId = volumeToPool.get(Long.valueOf(volume.getId())); + final Long poolId = volumeToPool.get(volume.getId()); final StoragePoolVO destPool = _storagePoolDao.findById(poolId); final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); if (destPool != null) { - // Check if pool is accessible from the destination host and disk offering with which the volume was - // created is compliant with the pool type. - if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) { - // Cannot find a pool for the volume. Throw an exception. - throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host + - ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + - "the given pool."); - } else if (destPool.getId() == currentPool.getId()) { - // If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated. - } else { - volumeToPoolObjectMap.put(volume, destPool); + if (currentPool.isManaged()) { + if (destPool.getId() == currentPool.getId()) { + volumeToPoolObjectMap.put(volume, currentPool); + } + else { + throw new CloudRuntimeException("Currently, a volume on managed storage can only be 'migrated' to itself."); + } + } + else { + // Check if pool is accessible from the destination host and disk offering with which the volume was + // created is compliant with the pool type. + if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) { + // Cannot find a pool for the volume. Throw an exception. + throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host + + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + + "the given pool."); + } else if (destPool.getId() == currentPool.getId()) { + // If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated. + } else { + volumeToPoolObjectMap.put(volume, destPool); + } } } else { if (currentPool.isManaged()) { - volumeToPoolObjectMap.put(volume, currentPool); + if (currentPool.getScope() == ScopeType.ZONE) { + volumeToPoolObjectMap.put(volume, currentPool); + } + else { + throw new CloudRuntimeException("Currently, you can only 'migrate' a volume on managed storage if its storage pool is zone wide."); + } } else { // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools. final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); - final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null); + final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), + host.getId(), null, null); final List poolList = new ArrayList<>(); final ExcludeList avoid = new ExcludeList(); @@ -3588,6 +3725,12 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI try { final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); + + String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString()); + boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence); + + mc.setAutoConvergence(kvmAutoConvergence); + mc.setHostGuid(dest.getHost().getGuid()); try { diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index eccbfd650db..f02fdc495f9 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -610,16 +610,23 @@ public boolean volumeOnSharedStoragePool(Volume volume) { @Override public boolean volumeInactive(Volume volume) { Long vmId = volume.getInstanceId(); - if (vmId != null) { - UserVm vm = _entityMgr.findById(UserVm.class, vmId); - if (vm == null) { - return true; - } - State state = vm.getState(); - if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { - return true; - } + + if (vmId == null) { + return true; + } + + UserVm vm = _entityMgr.findById(UserVm.class, vmId); + + if (vm == null) { + return true; + } + + State state = vm.getState(); + + if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { + return true; } + return false; } @@ -1274,8 +1281,9 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); Long templateId = newVol.getTemplateId(); for (int i = 0; i < 2; i++) { - // retry one more time in case of template reload is required for Vmware case - AsyncCallFuture future = null; + // retry one more time in case of template reload is required for VMware case + AsyncCallFuture future; + if (templateId == null) { DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType(); @@ -1368,23 +1376,34 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto List tasks = getTasks(vols, dest.getStorageForDisks(), vm); Volume vol = null; - StoragePool pool = null; + StoragePool pool; for (VolumeTask task : tasks) { if (task.type == VolumeTaskType.NOP) { pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); + + if (task.pool != null && task.pool.isManaged()) { + long hostId = vm.getVirtualMachine().getHostId(); + Host host = _hostDao.findById(hostId); + + volService.grantAccess(volFactory.getVolume(task.volume.getId()), host, (DataStore)pool); + } + vol = task.volume; + // For a zone-wide managed storage, it is possible that the VM can be started in another - // cluster. In that case make sure that the volume in in the right access group cluster. + // cluster. In that case, make sure that the volume is in the right access group. if (pool.isManaged()) { long oldHostId = vm.getVirtualMachine().getLastHostId(); long hostId = vm.getVirtualMachine().getHostId(); + if (oldHostId != hostId) { Host oldHost = _hostDao.findById(oldHostId); Host host = _hostDao.findById(hostId); DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + storageMgr.removeStoragePoolFromCluster(oldHostId, vol.get_iScsiName(), pool); + volService.revokeAccess(volFactory.getVolume(vol.getId()), oldHost, storagePool); - volService.grantAccess(volFactory.getVolume(vol.getId()), host, storagePool); } } } else if (task.type == VolumeTaskType.MIGRATE) { diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 2b722906043..30cff850c88 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -19,6 +19,16 @@ package org.apache.cloudstack.storage.motion; import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.ModifyTargetsAnswer; +import com.cloud.agent.api.ModifyTargetsCommand; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.storage.MigrateVolumeAnswer; +import com.cloud.agent.api.storage.MigrateVolumeCommand; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; @@ -33,24 +43,35 @@ import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ManagementService; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + import com.google.common.base.Preconditions; + import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -81,46 +102,58 @@ import org.apache.cloudstack.storage.command.ResignatureAnswer; import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.math.NumberUtils; import org.apache.log4j.Logger; + import org.springframework.stereotype.Component; import javax.inject.Inject; + import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.concurrent.ExecutionException; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; @Component public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class); private static final Random RANDOM = new Random(System.nanoTime()); + private static final int LOCK_TIME_IN_SECONDS = 300; + private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; @Inject private AgentManager _agentMgr; @Inject private ConfigurationDao _configDao; @Inject private DataStoreManager dataStoreMgr; @Inject private DiskOfferingDao _diskOfferingDao; + @Inject private GuestOSCategoryDao _guestOsCategoryDao; + @Inject private GuestOSDao _guestOsDao; @Inject private ClusterDao clusterDao; @Inject private HostDao _hostDao; @Inject private HostDetailsDao hostDetailsDao; - @Inject private ManagementService _mgr; @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private SnapshotDao _snapshotDao; + @Inject private SnapshotDataStoreDao _snapshotDataStoreDao; @Inject private SnapshotDetailsDao _snapshotDetailsDao; + @Inject private VMInstanceDao _vmDao; + @Inject private VMTemplateDao _vmTemplateDao; @Inject private VolumeDao _volumeDao; @Inject private VolumeDataFactory _volumeDataFactory; @Inject private VolumeDetailsDao volumeDetailsDao; @Inject private VolumeService _volumeService; @Inject private StorageCacheManager cacheMgr; @Inject private EndPointSelector selector; + @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { if (srcData instanceof SnapshotInfo) { @@ -136,9 +169,40 @@ public StrategyPriority canHandle(DataObject srcData, DataObject destData) { return StrategyPriority.HIGHEST; } + if (srcData instanceof VolumeInfo && destData instanceof VolumeInfo) { + VolumeInfo srcVolumeInfo = (VolumeInfo)srcData; + + if (isVolumeOnManagedStorage(srcVolumeInfo)) { + return StrategyPriority.HIGHEST; + } + + VolumeInfo destVolumeInfo = (VolumeInfo)destData; + + if (isVolumeOnManagedStorage(destVolumeInfo)) { + return StrategyPriority.HIGHEST; + } + } + + if (srcData instanceof VolumeInfo && destData instanceof TemplateInfo) { + VolumeInfo srcVolumeInfo = (VolumeInfo)srcData; + + if (isVolumeOnManagedStorage(srcVolumeInfo)) { + return StrategyPriority.HIGHEST; + } + } + return StrategyPriority.CANT_HANDLE; } + private boolean isVolumeOnManagedStorage(VolumeInfo volumeInfo) { + long storagePooldId = volumeInfo.getDataStore().getId(); + StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePooldId); + + return storagePoolVO.isManaged(); + } + + // canHandle returns true if the storage driver for the DataObject that's passed in can support certain features (what features we + // care about during a particular invocation of this method depend on what type of DataObject was passed in (ex. VolumeInfo versus SnapshotInfo)). private boolean canHandle(DataObject dataObject) { Preconditions.checkArgument(dataObject != null, "Passing 'null' to dataObject of canHandle(DataObject) is not supported."); @@ -151,7 +215,7 @@ private boolean canHandle(DataObject dataObject) { return false; } - if (dataObject instanceof VolumeInfo || dataObject instanceof SnapshotInfo) { + if (dataObject instanceof VolumeInfo || dataObject instanceof SnapshotInfo) { String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value); @@ -170,7 +234,6 @@ private boolean canHandle(DataObject dataObject) { return true; } - } } @@ -179,123 +242,320 @@ private boolean canHandle(DataObject dataObject) { @Override public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) { + Set volumeInfoSet = volumeMap.keySet(); + + for (VolumeInfo volumeInfo : volumeInfoSet) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId()); + + if (storagePoolVO.isManaged()) { + return StrategyPriority.HIGHEST; + } + } + + Collection dataStores = volumeMap.values(); + + for (DataStore dataStore : dataStores) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStore.getId()); + + if (storagePoolVO.isManaged()) { + return StrategyPriority.HIGHEST; + } + } + } + return StrategyPriority.CANT_HANDLE; } @Override public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { if (srcData instanceof SnapshotInfo) { - SnapshotInfo snapshotInfo = (SnapshotInfo)srcData; + SnapshotInfo srcSnapshotInfo = (SnapshotInfo)srcData; - validate(snapshotInfo); + handleCopyAsyncForSnapshot(srcSnapshotInfo, destData, callback); + } else if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo) { + TemplateInfo srcTemplateInfo = (TemplateInfo)srcData; + VolumeInfo destVolumeInfo = (VolumeInfo)destData; - boolean canHandleSrc = canHandle(srcData); + handleCopyAsyncForTemplateAndVolume(srcTemplateInfo, destVolumeInfo, callback); + } else if (srcData instanceof VolumeInfo && destData instanceof VolumeInfo) { + VolumeInfo srcVolumeInfo = (VolumeInfo)srcData; + VolumeInfo destVolumeInfo = (VolumeInfo)destData; - if (canHandleSrc && (destData instanceof TemplateInfo || destData instanceof SnapshotInfo) && - (destData.getDataStore().getRole() == DataStoreRole.Image || destData.getDataStore().getRole() == DataStoreRole.ImageCache)) { - handleCopyDataToSecondaryStorage(snapshotInfo, destData, callback); + handleCopyAsyncForVolumes(srcVolumeInfo, destVolumeInfo, callback); + } else if (srcData instanceof VolumeInfo && destData instanceof TemplateInfo && + (destData.getDataStore().getRole() == DataStoreRole.Image || destData.getDataStore().getRole() == DataStoreRole.ImageCache)) { + VolumeInfo srcVolumeInfo = (VolumeInfo)srcData; + TemplateInfo destTemplateInfo = (TemplateInfo)destData; - return; - } + handleCreateTemplateFromVolume(srcVolumeInfo, destTemplateInfo, callback); + } + else { + handleError(OPERATION_NOT_SUPPORTED, callback); + } + } - if (destData instanceof VolumeInfo) { - VolumeInfo volumeInfo = (VolumeInfo)destData; + private void handleCopyAsyncForSnapshot(SnapshotInfo srcSnapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + verifyFormat(srcSnapshotInfo); - boolean canHandleDest = canHandle(destData); + boolean canHandleSrc = canHandle(srcSnapshotInfo); - if (canHandleSrc && canHandleDest) { - if (snapshotInfo.getDataStore().getId() == volumeInfo.getDataStore().getId()) { - handleCreateVolumeFromSnapshotBothOnStorageSystem(snapshotInfo, volumeInfo, callback); - return; - } - else { - String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + - "not supported by source or destination storage plug-in). " + getSrcDestDataStoreMsg(srcData, destData); + if (canHandleSrc && (destData instanceof TemplateInfo || destData instanceof SnapshotInfo) && + (destData.getDataStore().getRole() == DataStoreRole.Image || destData.getDataStore().getRole() == DataStoreRole.ImageCache)) { + handleCopyDataToSecondaryStorage(srcSnapshotInfo, destData, callback); + } else if (destData instanceof VolumeInfo) { + handleCopyAsyncForSnapshotToVolume(srcSnapshotInfo, (VolumeInfo)destData, callback); + } else { + handleError(OPERATION_NOT_SUPPORTED, callback); + } + } - LOGGER.warn(errMsg); + private void handleCopyAsyncForSnapshotToVolume(SnapshotInfo srcSnapshotInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + boolean canHandleDest = canHandle(destVolumeInfo); - throw new UnsupportedOperationException(errMsg); - } - } + if (!canHandleDest) { + handleError(OPERATION_NOT_SUPPORTED, callback); + } - if (canHandleDest) { - handleCreateVolumeFromSnapshotOnSecondaryStorage(snapshotInfo, volumeInfo, callback); + boolean canHandleSrc = canHandle(srcSnapshotInfo); - return; - } + if (!canHandleSrc) { + handleCreateVolumeFromSnapshotOnSecondaryStorage(srcSnapshotInfo, destVolumeInfo, callback); + } + + if (srcSnapshotInfo.getDataStore().getId() == destVolumeInfo.getDataStore().getId()) { + handleCreateVolumeFromSnapshotBothOnStorageSystem(srcSnapshotInfo, destVolumeInfo, callback); + } else { + String errMsg = "To perform this operation, the source and destination primary storages must be the same."; - if (canHandleSrc) { - String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + - "not supported by source storage plug-in). " + getSrcDataStoreMsg(srcData); + handleError(errMsg, callback); + } + } - LOGGER.warn(errMsg); + private void handleCopyAsyncForTemplateAndVolume(TemplateInfo srcTemplateInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { + boolean canHandleSrc = canHandle(srcTemplateInfo); - throw new UnsupportedOperationException(errMsg); - } - } - } else if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo) { - boolean canHandleSrc = canHandle(srcData); + if (!canHandleSrc) { + handleError(OPERATION_NOT_SUPPORTED, callback); + } + + handleCreateVolumeFromTemplateBothOnStorageSystem(srcTemplateInfo, destVolumeInfo, callback); + } - if (!canHandleSrc) { - String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_CAN_CREATE_VOLUME_FROM_VOLUME " + - "not supported by destination storage plug-in). " + getDestDataStoreMsg(destData); + private void handleCopyAsyncForVolumes(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { + if (srcVolumeInfo.getState() == Volume.State.Migrating) { + if (isVolumeOnManagedStorage(srcVolumeInfo)) { + if (destVolumeInfo.getDataStore().getRole() == DataStoreRole.Image || destVolumeInfo.getDataStore().getRole() == DataStoreRole.ImageCache) { + handleVolumeCopyFromManagedStorageToSecondaryStorage(srcVolumeInfo, destVolumeInfo, callback); + } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { + handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback); + } else { + String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " + + "Migration in this case is not yet supported."; - LOGGER.warn(errMsg); + handleError(errMsg, callback); + } + } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { + String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case."; - throw new UnsupportedOperationException(errMsg); + handleError(errMsg, callback); + } else { + handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } + } else if (srcVolumeInfo.getState() == Volume.State.Uploaded && + (srcVolumeInfo.getDataStore().getRole() == DataStoreRole.Image || srcVolumeInfo.getDataStore().getRole() == DataStoreRole.ImageCache) && + destVolumeInfo.getDataStore().getRole() == DataStoreRole.Primary) { + ImageFormat imageFormat = destVolumeInfo.getFormat(); - handleCreateVolumeFromTemplateBothOnStorageSystem((TemplateInfo)srcData, (VolumeInfo)destData, callback); + if (!ImageFormat.QCOW2.equals(imageFormat)) { + String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this upload use case (non KVM)."; + + handleError(errMsg, callback); + } - return; + handleCreateVolumeFromVolumeOnSecondaryStorage(srcVolumeInfo, destVolumeInfo, destVolumeInfo.getDataCenterId(), HypervisorType.KVM, callback); + } else { + handleError(OPERATION_NOT_SUPPORTED, callback); } + } + + private void handleError(String errMsg, AsyncCompletionCallback callback) { + LOGGER.warn(errMsg); + + invokeCallback(errMsg, callback); - throw new UnsupportedOperationException("This operation is not supported."); + throw new UnsupportedOperationException(errMsg); } - private String getSrcDestDataStoreMsg(DataObject srcData, DataObject destData) { - Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported."); - Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported."); + private void invokeCallback(String errMsg, AsyncCompletionCallback callback) { + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); - return "Source data store = " + srcData.getDataStore().getName() + "; " + "Destination data store = " + destData.getDataStore().getName() + "."; + callback.complete(result); } - private String getSrcDataStoreMsg(DataObject srcData) { - Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDataStoreMsg(DataObject) is not supported."); + private void handleVolumeCopyFromManagedStorageToSecondaryStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + String errMsg = null; + String volumePath = null; + + try { + if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + + "from managed storage to non-managed storage."); + } + + HypervisorType hypervisorType = HypervisorType.KVM; + VirtualMachine vm = srcVolumeInfo.getAttachedVM(); + + if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { + throw new CloudRuntimeException("Currently, if a volume to copy from managed storage to secondary storage is attached to " + + "a VM, the VM must be in the Stopped state."); + } + + long srcStoragePoolId = srcVolumeInfo.getPoolId(); + StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId); + + HostVO hostVO; + + if (srcStoragePoolVO.getClusterId() != null) { + hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + } + else { + hostVO = getHost(srcVolumeInfo.getDataCenterId(), hypervisorType, false); + } + + volumePath = copyVolumeToSecondaryStorage(srcVolumeInfo, destVolumeInfo, hostVO, + "Unable to copy the volume from managed storage to secondary storage"); + } + catch (Exception ex) { + errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + CopyCmdAnswer copyCmdAnswer; + + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else if (volumePath == null) { + copyCmdAnswer = new CopyCmdAnswer("Unable to acquire a volume path"); + } + else { + VolumeObjectTO volumeObjectTO = (VolumeObjectTO)destVolumeInfo.getTO(); + + volumeObjectTO.setPath(volumePath); + + copyCmdAnswer = new CopyCmdAnswer(volumeObjectTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); - return "Source data store = " + srcData.getDataStore().getName() + "."; + callback.complete(result); + } } - private String getDestDataStoreMsg(DataObject destData) { - Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getDestDataStoreMsg(DataObject) is not supported."); + private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + String errMsg = null; + + try { + if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + + "from managed storage to non-managed storage."); + } + + HypervisorType hypervisorType = HypervisorType.KVM; + VirtualMachine vm = srcVolumeInfo.getAttachedVM(); + + if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { + throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " + + "a VM, the VM must be in the Stopped state."); + } + + long destStoragePoolId = destVolumeInfo.getPoolId(); + StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId); + + HostVO hostVO; + + if (destStoragePoolVO.getClusterId() != null) { + hostVO = getHostInCluster(destStoragePoolVO.getClusterId()); + } + else { + hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false); + } + + setCertainVolumeValuesNull(destVolumeInfo.getId()); + + // migrate the volume via the hypervisor + String path = migrateVolume(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from managed storage to non-managed storage"); + + updateVolumePath(destVolumeInfo.getId(), path); + } + catch (Exception ex) { + errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + CopyCmdAnswer copyCmdAnswer; + + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + DataTO dataTO = destVolumeInfo.getTO(); + + copyCmdAnswer = new CopyCmdAnswer(dataTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } + } - return "Destination data store = " + destData.getDataStore().getName() + "."; + private void verifyFormat(ImageFormat imageFormat) { + if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) { + throw new CloudRuntimeException("Only the following image types are currently supported: " + + ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", and " + ImageFormat.QCOW2); + } } - private void validate(SnapshotInfo snapshotInfo) { + private void verifyFormat(SnapshotInfo snapshotInfo) { long volumeId = snapshotInfo.getVolumeId(); VolumeVO volumeVO = _volumeDao.findByIdIncludingRemoved(volumeId); - if (volumeVO.getFormat() != ImageFormat.VHD) { - throw new CloudRuntimeException("Only the " + ImageFormat.VHD.toString() + " image type is currently supported."); - } + verifyFormat(volumeVO.getFormat()); } private boolean usingBackendSnapshotFor(SnapshotInfo snapshotInfo) { - String property = getProperty(snapshotInfo.getId(), "takeSnapshot"); + String property = getSnapshotProperty(snapshotInfo.getId(), "takeSnapshot"); return Boolean.parseBoolean(property); } - protected boolean needCacheStorage(DataObject srcData, DataObject destData) { + private boolean needCacheStorage(DataObject srcData, DataObject destData) { DataTO srcTO = srcData.getTO(); DataStoreTO srcStoreTO = srcTO.getDataStore(); DataTO destTO = destData.getTO(); DataStoreTO destStoreTO = destTO.getDataStore(); - // both snapshot and volume are on primary datastore. No need for a cache storage as - // hypervisor will copy directly + // both snapshot and volume are on primary datastore - no need for a cache storage as hypervisor will copy directly if (srcStoreTO instanceof PrimaryDataStoreTO && destStoreTO instanceof PrimaryDataStoreTO) { return false; } @@ -304,14 +564,15 @@ protected boolean needCacheStorage(DataObject srcData, DataObject destData) { return false; } - if (destStoreTO instanceof NfsTO || destStoreTO.getRole() == DataStoreRole.ImageCache) { return false; } + if (LOGGER.isDebugEnabled()) { - LOGGER.debug("needCacheStorage true, dest at " + destTO.getPath() + " dest role " + destStoreTO.getRole().toString() + srcTO.getPath() + " src role " + - srcStoreTO.getRole().toString()); + LOGGER.debug("needCacheStorage true; dest at " + destTO.getPath() + ", dest role " + destStoreTO.getRole().toString() + "; src at " + + srcTO.getPath() + ", src role " + srcStoreTO.getRole().toString()); } + return true; } @@ -320,6 +581,7 @@ private Scope pickCacheScopeForCopy(DataObject srcData, DataObject destData) { Scope destScope = destData.getDataStore().getScope(); Scope selectedScope = null; + if (srcScope.getScopeId() != null) { selectedScope = getZoneScope(srcScope); } else if (destScope.getScopeId() != null) { @@ -327,86 +589,234 @@ private Scope pickCacheScopeForCopy(DataObject srcData, DataObject destData) { } else { LOGGER.warn("Cannot find a zone-wide scope for movement that needs a cache storage"); } + return selectedScope; } private Scope getZoneScope(Scope scope) { ZoneScope zoneScope; + if (scope instanceof ClusterScope) { ClusterScope clusterScope = (ClusterScope)scope; + zoneScope = new ZoneScope(clusterScope.getZoneId()); } else if (scope instanceof HostScope) { HostScope hostScope = (HostScope)scope; + zoneScope = new ZoneScope(hostScope.getZoneId()); } else { zoneScope = (ZoneScope)scope; } + return zoneScope; } + private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + String errMsg = null; + + try { + HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType(); + + if (!HypervisorType.KVM.equals(hypervisorType)) { + throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + + "from non-managed storage to managed storage."); + } + + VirtualMachine vm = srcVolumeInfo.getAttachedVM(); + + if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { + throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage is attached to " + + "a VM, the VM must be in the Stopped state."); + } + + destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + + volumeVO.setPath(volumeVO.get_iScsiName()); + + _volumeDao.update(volumeVO.getId(), volumeVO); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + long srcStoragePoolId = srcVolumeInfo.getPoolId(); + StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId); + + HostVO hostVO; + + if (srcStoragePoolVO.getClusterId() != null) { + hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + } + else { + hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false); + } + + // migrate the volume via the hypervisor + migrateVolume(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); + + volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + + volumeVO.setFormat(ImageFormat.QCOW2); + + _volumeDao.update(volumeVO.getId(), volumeVO); + } + catch (Exception ex) { + errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + CopyCmdAnswer copyCmdAnswer; + + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + DataTO dataTO = destVolumeInfo.getTO(); + + copyCmdAnswer = new CopyCmdAnswer(dataTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } + } + /** - * This function is responsible for copying a volume from the managed store to a secondary store. This is used in two cases + * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * 1) When creating a template from a snapshot * 2) When createSnapshot is called with location=SECONDARY * - * @param snapshotInfo Source snapshot + * @param snapshotInfo source snapshot * @param destData destination (can be template or snapshot) * @param callback callback for async */ private void handleCopyDataToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; + boolean usingBackendSnapshot = false; + try { snapshotInfo.processEvent(Event.CopyingRequested); - } - catch (Exception ex) { - throw new CloudRuntimeException("This snapshot is not currently in a state where it can be used to create a template."); - } - HostVO hostVO = getHost(snapshotInfo); + HostVO hostVO = getHost(snapshotInfo); - boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo); - boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); - boolean needCache = needCacheStorage(snapshotInfo, destData); + boolean needCache = needCacheStorage(snapshotInfo, destData); - DataObject destOnStore = destData; + DataObject destOnStore = destData; - if (needCache) { - // creates an object in the DB for data to be cached - Scope selectedScope = pickCacheScopeForCopy(snapshotInfo, destData); - destOnStore = cacheMgr.getCacheObject(snapshotInfo, selectedScope); - destOnStore.processEvent(Event.CreateOnlyRequested); - } + if (needCache) { + // creates an object in the DB for data to be cached + Scope selectedScope = pickCacheScopeForCopy(snapshotInfo, destData); - if (usingBackendSnapshot && !computeClusterSupportsResign) { - String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); + destOnStore = cacheMgr.getCacheObject(snapshotInfo, selectedScope); - LOGGER.warn(noSupportForResignErrMsg); + destOnStore.processEvent(Event.CreateOnlyRequested); + } - throw new CloudRuntimeException(noSupportForResignErrMsg); - } + usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo); - try { if (usingBackendSnapshot) { - createVolumeFromSnapshot(hostVO, snapshotInfo, true); + final boolean computeClusterSupportsVolumeClone; + + // only XenServer, VMware, and KVM are currently supported + if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) { + computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); + } + else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || HypervisorType.KVM.equals(snapshotInfo.getHypervisorType())) { + computeClusterSupportsVolumeClone = true; + } + else { + throw new CloudRuntimeException("Unsupported hypervisor type"); + } + + if (!computeClusterSupportsVolumeClone) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + + hostVO.getClusterId(); + + LOGGER.warn(noSupportForResignErrMsg); + + throw new CloudRuntimeException(noSupportForResignErrMsg); + } } + String vmdk = null; + String uuid = null; + boolean keepGrantedAccess = false; + DataStore srcDataStore = snapshotInfo.getDataStore(); + if (usingBackendSnapshot) { + createVolumeFromSnapshot(snapshotInfo); + + if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType()) || HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + keepGrantedAccess = HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType()); + + Map extraDetails = null; + + if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + extraDetails = new HashMap<>(); + + String extraDetailsVmdk = getSnapshotProperty(snapshotInfo.getId(), DiskTO.VMDK); + + extraDetails.put(DiskTO.VMDK, extraDetailsVmdk); + extraDetails.put(DiskTO.TEMPLATE_RESIGN, Boolean.TRUE.toString()); + } + + copyCmdAnswer = performResignature(snapshotInfo, hostVO, extraDetails, keepGrantedAccess); + + // If using VMware, have the host rescan its software HBA if dynamic discovery is in use. + if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + String iqn = getSnapshotProperty(snapshotInfo.getId(), DiskTO.IQN); + + disconnectHostFromVolume(hostVO, srcDataStore.getId(), iqn); + } + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); + } else { + throw new CloudRuntimeException("Unable to create volume from snapshot"); + } + } + + vmdk = copyCmdAnswer.getNewData().getPath(); + uuid = UUID.randomUUID().toString(); + } + } + String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); - CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - - String errMsg = null; - CopyCmdAnswer copyCmdAnswer = null; + CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); try { - // If we are using a back-end snapshot, then we should still have access to it from the hosts in the cluster that hostVO is in - // (because we passed in true as the third parameter to createVolumeFromSnapshot above). - if (!usingBackendSnapshot) { + if (!keepGrantedAccess) { _volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore); } Map srcDetails = getSnapshotDetails(snapshotInfo); + if (isForVMware(destData)) { + srcDetails.put(DiskTO.VMDK, vmdk); + srcDetails.put(DiskTO.UUID, uuid); + + if (destData instanceof TemplateInfo) { + VMTemplateVO templateDataStoreVO = _vmTemplateDao.findById(destData.getId()); + + templateDataStoreVO.setUniqueName(uuid); + + _vmTemplateDao.update(destData.getId(), templateDataStoreVO); + } + } + copyCommand.setOptions(srcDetails); copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); @@ -423,15 +833,16 @@ private void handleCopyDataToSecondaryStorage(SnapshotInfo snapshotInfo, DataObj // storage), at this point, the data has been copied from the primary // to the NFS cache by the hypervisor. We now invoke another copy // command to copy this data from cache to secondary storage. We - // then cleanup the cache + // then clean up the cache. destOnStore.processEvent(Event.OperationSuccessed, copyCmdAnswer); - CopyCommand cmd = new CopyCommand(destOnStore.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(destOnStore.getTO(), destData.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = selector.select(destOnStore, destData); if (ep == null) { - errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + errMsg = "No remote endpoint to send command, check if host or SSVM is down"; LOGGER.error(errMsg); @@ -443,16 +854,22 @@ private void handleCopyDataToSecondaryStorage(SnapshotInfo snapshotInfo, DataObj // clean up snapshot copied to staging cacheMgr.deleteCacheObject(destOnStore); } - } catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; LOGGER.warn(msg, ex); - throw new CloudRuntimeException(msg + ex.getMessage()); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { _volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore); + // If using VMware, have the host rescan its software HBA if dynamic discovery is in use. + if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + String iqn = getSnapshotProperty(snapshotInfo.getId(), DiskTO.IQN); + + disconnectHostFromVolume(hostVO, srcDataStore.getId(), iqn); + } + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { errMsg = copyCmdAnswer.getDetails(); @@ -478,111 +895,156 @@ private void handleCopyDataToSecondaryStorage(SnapshotInfo snapshotInfo, DataObj LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); } } + } + catch (Exception ex) { + errMsg = ex.getMessage(); - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - - result.setResult(errMsg); - - callback.complete(result); + throw new CloudRuntimeException(errMsg); } finally { if (usingBackendSnapshot) { deleteVolumeFromSnapshot(snapshotInfo); } - } + + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } } /** * Creates a volume on the storage from a snapshot that resides on the secondary storage (archived snapshot). * @param snapshotInfo snapshot on secondary * @param volumeInfo volume to be created on the storage - * @param callback for async + * @param callback for async */ - private void handleCreateVolumeFromSnapshotOnSecondaryStorage(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { - // at this point, the snapshotInfo and volumeInfo should have the same disk offering ID (so either one should be OK to get a DiskOfferingVO instance) - DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId()); - SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId()); + private void handleCreateVolumeFromSnapshotOnSecondaryStorage(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, + AsyncCompletionCallback callback) { + String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; - // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage) - _volumeService.updateHypervisorSnapshotReserveForVolume(diskOffering, volumeInfo.getId(), snapshot.getHypervisorType()); + try { + // at this point, the snapshotInfo and volumeInfo should have the same disk offering ID (so either one should be OK to get a DiskOfferingVO instance) + DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId()); + SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId()); - CopyCmdAnswer copyCmdAnswer = null; - String errMsg = null; + // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage) + _volumeService.updateHypervisorSnapshotReserveForVolume(diskOffering, volumeInfo.getId(), snapshot.getHypervisorType()); - HostVO hostVO = null; + HostVO hostVO; - try { // create a volume on the storage AsyncCallFuture future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore()); VolumeApiResult result = future.get(); if (result.isFailed()) { LOGGER.error("Failed to create a volume: " + result.getResult()); + throw new CloudRuntimeException(result.getResult()); } volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); - volumeInfo.processEvent(Event.MigrationRequested); - volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); - hostVO = getHost(snapshotInfo.getDataCenterId(), false); + hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); // copy the volume from secondary via the hypervisor - copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO); + if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) { + copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO); + } + else { + copyCmdAnswer = copyImageToVolume(snapshotInfo, volumeInfo, hostVO); + } if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { - errMsg = copyCmdAnswer.getDetails(); + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); } else { - errMsg = "Unable to create volume from snapshot"; + throw new CloudRuntimeException("Unable to create volume from snapshot"); } } } catch (Exception ex) { - errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'"; + errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotOnSecondaryStorage': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); } + finally { + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - result.setResult(errMsg); + result.setResult(errMsg); - callback.complete(result); + callback.complete(result); + } } /** * Clones a template present on the storage to a new volume and resignatures it. * - * @param templateInfo source template - * @param volumeInfo destination ROOT volume - * @param callback for async + * @param templateInfo source template + * @param volumeInfo destination ROOT volume + * @param callback for async */ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo templateInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { - Preconditions.checkArgument(templateInfo != null, "Passing 'null' to templateInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); - Preconditions.checkArgument(volumeInfo != null, "Passing 'null' to volumeInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); - - CopyCmdAnswer copyCmdAnswer = null; String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; - HostVO hostVO = getHost(volumeInfo.getDataCenterId(), true); + try { + Preconditions.checkArgument(templateInfo != null, "Passing 'null' to templateInfo of " + + "handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); + Preconditions.checkArgument(volumeInfo != null, "Passing 'null' to volumeInfo of " + + "handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); - if (hostVO == null) { - throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + volumeInfo.getDataCenterId()); - } + verifyFormat(templateInfo.getFormat()); - boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); + HostVO hostVO = null; - if (!computeClusterSupportsResign) { - String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); + final boolean computeClusterSupportsVolumeClone; - LOGGER.warn(noSupportForResignErrMsg); + // only XenServer, VMware, and KVM are currently supported + // Leave host equal to null for KVM since we don't need to perform a resignature when using that hypervisor type. + if (volumeInfo.getFormat() == ImageFormat.VHD) { + hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.XenServer, true); - throw new CloudRuntimeException(noSupportForResignErrMsg); - } + if (hostVO == null) { + throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + + volumeInfo.getDataCenterId()); + } + + computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); + + if (!computeClusterSupportsVolumeClone) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + + hostVO.getClusterId(); + + LOGGER.warn(noSupportForResignErrMsg); + + throw new CloudRuntimeException(noSupportForResignErrMsg); + } + } + else if (volumeInfo.getFormat() == ImageFormat.OVA) { + // all VMware hosts support resigning + hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.VMware, false); + + if (hostVO == null) { + throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + + volumeInfo.getDataCenterId()); + } + } - try { VolumeDetailVO volumeDetail = new VolumeDetailVO(volumeInfo.getId(), "cloneOfTemplate", String.valueOf(templateInfo.getId()), @@ -591,6 +1053,7 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp volumeDetail = volumeDetailsDao.persist(volumeDetail); AsyncCallFuture future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore()); + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(_configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); VolumeApiResult result = future.get(storagePoolMaxWaitSeconds, TimeUnit.SECONDS); @@ -601,6 +1064,7 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp if (result.isFailed()) { LOGGER.warn("Failed to create a volume: " + result.getResult()); + throw new CloudRuntimeException(result.getResult()); } @@ -608,48 +1072,100 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp volumeInfo.processEvent(Event.MigrationRequested); volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); - copyCmdAnswer = performResignature(volumeInfo, hostVO); + if (hostVO != null) { + Map extraDetails = null; - if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { - if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { - throw new CloudRuntimeException(copyCmdAnswer.getDetails()); - } else { - throw new CloudRuntimeException("Unable to create a volume from a template"); + if (HypervisorType.VMware.equals(templateInfo.getHypervisorType())) { + extraDetails = new HashMap<>(); + + String extraDetailsVmdk = templateInfo.getUniqueName() + ".vmdk"; + + extraDetails.put(DiskTO.VMDK, extraDetailsVmdk); + extraDetails.put(DiskTO.EXPAND_DATASTORE, Boolean.TRUE.toString()); } + + copyCmdAnswer = performResignature(volumeInfo, hostVO, extraDetails); + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); + } else { + throw new CloudRuntimeException("Unable to create a volume from a template"); + } + } + + // If using VMware, have the host rescan its software HBA if dynamic discovery is in use. + if (HypervisorType.VMware.equals(templateInfo.getHypervisorType())) { + disconnectHostFromVolume(hostVO, volumeInfo.getPoolId(), volumeInfo.get_iScsiName()); + } + } + else { + VolumeObjectTO newVolume = new VolumeObjectTO(); + + newVolume.setSize(volumeInfo.getSize()); + newVolume.setPath(volumeInfo.getPath()); + newVolume.setFormat(volumeInfo.getFormat()); + + copyCmdAnswer = new CopyCmdAnswer(newVolume); + } + } catch (Exception ex) { + try { + volumeInfo.getDataStore().getDriver().deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); + } + catch (Exception exc) { + LOGGER.warn("Failed to delete volume", exc); + } + + if (templateInfo != null) { + errMsg = "Create volume from template (ID = " + templateInfo.getId() + ") failed: " + ex.getMessage(); + } + else { + errMsg = "Create volume from template failed: " + ex.getMessage(); } - } catch (InterruptedException | ExecutionException | TimeoutException ex ) { - volumeInfo.getDataStore().getDriver().deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); - throw new CloudRuntimeException("Create volume from template (ID = " + templateInfo.getId() + ") failed " + ex.getMessage()); + throw new CloudRuntimeException(errMsg); } + finally { + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - result.setResult(errMsg); + result.setResult(errMsg); - callback.complete(result); + callback.complete(result); + } } - private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { - CopyCmdAnswer copyCmdAnswer = null; + private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, + AsyncCompletionCallback callback) { String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; try { + verifyFormat(snapshotInfo); + HostVO hostVO = getHost(snapshotInfo); boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo); - boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); + boolean computeClusterSupportsVolumeClone = true; + + if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) { + computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); - if (usingBackendSnapshot && !computeClusterSupportsResign) { - String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); + if (usingBackendSnapshot && !computeClusterSupportsVolumeClone) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + + hostVO.getClusterId(); - LOGGER.warn(noSupportForResignErrMsg); + LOGGER.warn(noSupportForResignErrMsg); - throw new CloudRuntimeException(noSupportForResignErrMsg); + throw new CloudRuntimeException(noSupportForResignErrMsg); + } } boolean canStorageSystemCreateVolumeFromVolume = canStorageSystemCreateVolumeFromVolume(snapshotInfo); - boolean useCloning = usingBackendSnapshot || (canStorageSystemCreateVolumeFromVolume && computeClusterSupportsResign); + boolean useCloning = usingBackendSnapshot || (canStorageSystemCreateVolumeFromVolume && computeClusterSupportsVolumeClone); VolumeDetailVO volumeDetail = null; @@ -670,7 +1186,6 @@ private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snap _volumeService.updateHypervisorSnapshotReserveForVolume(diskOffering, volumeInfo.getId(), snapshot.getHypervisorType()); AsyncCallFuture future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore()); - VolumeApiResult result = future.get(); if (volumeDetail != null) { @@ -680,99 +1195,523 @@ private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snap if (result.isFailed()) { LOGGER.warn("Failed to create a volume: " + result.getResult()); - throw new CloudRuntimeException(result.getResult()); - } + throw new CloudRuntimeException(result.getResult()); + } + + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + volumeInfo.processEvent(Event.MigrationRequested); + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + + if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType()) || HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + if (useCloning) { + Map extraDetails = null; + + if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + extraDetails = new HashMap<>(); + + String extraDetailsVmdk = getSnapshotProperty(snapshotInfo.getId(), DiskTO.VMDK); + + extraDetails.put(DiskTO.VMDK, extraDetailsVmdk); + } + + copyCmdAnswer = performResignature(volumeInfo, hostVO, extraDetails); + + // If using VMware, have the host rescan its software HBA if dynamic discovery is in use. + if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType())) { + disconnectHostFromVolume(hostVO, volumeInfo.getPoolId(), volumeInfo.get_iScsiName()); + } + } else { + // asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning + // even when we don't need those hosts to do this kind of copy work + hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); + + copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO); + } + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); + } else { + throw new CloudRuntimeException("Unable to create volume from snapshot"); + } + } + } + else if (HypervisorType.KVM.equals(snapshotInfo.getHypervisorType())) { + VolumeObjectTO newVolume = new VolumeObjectTO(); + + newVolume.setSize(volumeInfo.getSize()); + newVolume.setPath(volumeInfo.get_iScsiName()); + newVolume.setFormat(volumeInfo.getFormat()); + + copyCmdAnswer = new CopyCmdAnswer(newVolume); + } + else { + throw new CloudRuntimeException("Unsupported hypervisor type"); + } + } + catch (Exception ex) { + errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } + } + + private void handleCreateVolumeFromVolumeOnSecondaryStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + long dataCenterId, HypervisorType hypervisorType, + AsyncCompletionCallback callback) { + String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; + + try { + // create a volume on the storage + destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + HostVO hostVO = getHost(dataCenterId, hypervisorType, false); + + // copy the volume from secondary via the hypervisor + copyCmdAnswer = copyImageToVolume(srcVolumeInfo, destVolumeInfo, hostVO); + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); + } + else { + throw new CloudRuntimeException("Unable to create volume from volume"); + } + } + } + catch (Exception ex) { + errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromVolumeOnSecondaryStorage': " + + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } + } + + private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { + String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + + CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); + + CopyCmdAnswer copyCmdAnswer; + + try { + _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + + Map destDetails = getVolumeDetails(destVolumeInfo); + + copyCommand.setOptions2(destDetails); + + copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); + } + catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { + String msg = "Failed to copy image : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } + + VolumeObjectTO volumeObjectTO = (VolumeObjectTO)copyCmdAnswer.getNewData(); + + volumeObjectTO.setFormat(ImageFormat.QCOW2); + + return copyCmdAnswer; + } + + /** + * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to + * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. + * + * The resultant volume must be writable because we need to resign the SR and the VDI that should be inside of it before we copy + * the VHD file to secondary storage. + * + * If the storage system is using writable snapshots, then nothing need be done by that storage system here because we can just + * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. + */ + private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } + } + + /** + * If the underlying storage system needed to create a volume from a snapshot for createVolumeFromSnapshot(SnapshotInfo), then + * this is its opportunity to delete that temporary volume and restore properties in snapshot_details to the way they were before the + * invocation of createVolumeFromSnapshot(SnapshotInfo). + */ + private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); + + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } + } + + private SnapshotDetailsVO handleSnapshotDetails(long csSnapshotId, String value) { + String name = "tempVolume"; + + _snapshotDetailsDao.removeDetail(csSnapshotId, name); + + SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false); + + return _snapshotDetailsDao.persist(snapshotDetails); + } + + /** + * For each disk to migrate: + * Create a volume on the target storage system. + * Make the newly created volume accessible to the target KVM host. + * Send a command to the target KVM host to connect to the newly created volume. + * Send a command to the source KVM host to migrate the VM and its storage. + */ + @Override + public void copyAsync(Map volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost, Host destHost, AsyncCompletionCallback callback) { + String errMsg = null; + + try { + if (srcHost.getHypervisorType() != HypervisorType.KVM) { + throw new CloudRuntimeException("Invalid hypervisor type (only KVM supported for this operation at the time being)"); + } + + verifyLiveMigrationMapForKVM(volumeDataStoreMap); + + Map migrateStorage = new HashMap<>(); + Map srcVolumeInfoToDestVolumeInfo = new HashMap<>(); + + for (Map.Entry entry : volumeDataStoreMap.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + DataStore destDataStore = entry.getValue(); + + VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId()); + StoragePoolVO destStoragePool = _storagePoolDao.findById(destDataStore.getId()); + + VolumeVO destVolume = duplicateVolumeOnAnotherStorage(srcVolume, destStoragePool); + VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + // move the volume from Allocated to Creating + destVolumeInfo.processEvent(Event.MigrationCopyRequested); + // move the volume from Creating to Ready + destVolumeInfo.processEvent(Event.MigrationCopySucceeded); + // move the volume from Ready to Migrating + destVolumeInfo.processEvent(Event.MigrationRequested); + + // create a volume on the destination storage + destDataStore.getDriver().createAsync(destDataStore, destVolumeInfo, null); + + destVolume = _volumeDao.findById(destVolume.getId()); + + destVolume.setPath(destVolume.get_iScsiName()); + + _volumeDao.update(destVolume.getId(), destVolume); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + _volumeService.grantAccess(destVolumeInfo, destHost, destDataStore); + + String connectedPath = connectHostToVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.get_iScsiName()); + + MigrateCommand.MigrateDiskInfo migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(), + MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, + MigrateCommand.MigrateDiskInfo.DriverType.RAW, + MigrateCommand.MigrateDiskInfo.Source.DEV, + connectedPath); + + migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo); + + srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo); + } + + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + + try { + Answer pfma = _agentMgr.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + String msg = "Unable to prepare for migration due to the following: " + details; + + throw new AgentUnavailableException(msg, destHost.getId()); + } + } + catch (final OperationTimedoutException e) { + throw new AgentUnavailableException("Operation timed out", destHost.getId()); + } + + VMInstanceVO vm = _vmDao.findById(vmTO.getId()); + boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); + + MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(), destHost.getPrivateIpAddress(), isWindows, vmTO, true); + + migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value()); + + migrateCommand.setMigrateStorage(migrateStorage); + + String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString()); + boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence); + + migrateCommand.setAutoConvergence(kvmAutoConvergence); + + MigrateAnswer migrateAnswer = (MigrateAnswer)_agentMgr.send(srcHost.getId(), migrateCommand); + + boolean success = migrateAnswer != null && migrateAnswer.getResult(); + + handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost); + + if (migrateAnswer == null) { + throw new CloudRuntimeException("Unable to get an answer to the migrate command"); + } + + if (!migrateAnswer.getResult()) { + errMsg = migrateAnswer.getDetails(); + + throw new CloudRuntimeException(errMsg); + } + } + catch (Exception ex) { + errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.copyAsync': " + ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); + } + } + + private void handlePostMigration(boolean success, Map srcVolumeInfoToDestVolumeInfo, VirtualMachineTO vmTO, Host destHost) { + if (!success) { + try { + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + + pfmc.setRollback(true); + + Answer pfma = _agentMgr.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + String msg = "Unable to rollback prepare for migration due to the following: " + details; + + throw new AgentUnavailableException(msg, destHost.getId()); + } + } + catch (Exception e) { + LOGGER.debug("Failed to disconnect one or more (original) dest volumes", e); + } + } + + for (Map.Entry entry : srcVolumeInfoToDestVolumeInfo.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + VolumeInfo destVolumeInfo = entry.getValue(); + + if (success) { + srcVolumeInfo.processEvent(Event.OperationSuccessed); + destVolumeInfo.processEvent(Event.OperationSuccessed); + + _volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId()); + + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + + volumeVO.setFormat(ImageFormat.QCOW2); + + _volumeDao.update(volumeVO.getId(), volumeVO); + + try { + _volumeService.destroyVolume(srcVolumeInfo.getId()); + + srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = _volumeService.expungeVolumeAsync(srcVolumeInfo); + + if (destroyFuture.get().isFailed()) { + LOGGER.debug("Failed to clean up source volume on storage"); + } + } catch (Exception e) { + LOGGER.debug("Failed to clean up source volume on storage", e); + } + + // Update the volume ID for snapshots on secondary storage + if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) { + _snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + _snapshotDataStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + } + } + else { + try { + disconnectHostFromVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.get_iScsiName()); + } + catch (Exception e) { + LOGGER.debug("Failed to disconnect (new) dest volume", e); + } + + try { + _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore()); + } + catch (Exception e) { + LOGGER.debug("Failed to revoke access from dest volume", e); + } + + destVolumeInfo.processEvent(Event.OperationFailed); + srcVolumeInfo.processEvent(Event.OperationFailed); + + try { + _volumeService.destroyVolume(destVolumeInfo.getId()); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = _volumeService.expungeVolumeAsync(destVolumeInfo); + + if (destroyFuture.get().isFailed()) { + LOGGER.debug("Failed to clean up dest volume on storage"); + } + } catch (Exception e) { + LOGGER.debug("Failed to clean up dest volume on storage", e); + } + } + } + } + + private VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePoolVO storagePoolVO) { + Long lastPoolId = volume.getPoolId(); + + VolumeVO newVol = new VolumeVO(volume); + + newVol.setInstanceId(null); + newVol.setChainInfo(null); + newVol.setPath(null); + newVol.setFolder(null); + newVol.setPodId(storagePoolVO.getPodId()); + newVol.setPoolId(storagePoolVO.getId()); + newVol.setLastPoolId(lastPoolId); + + return _volumeDao.persist(newVol); + } + + private String connectHostToVolume(Host host, long storagePoolId, String iqn) { + ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, true); + + return sendModifyTargetsCommand(modifyTargetsCommand, host.getId()).get(0); + } + + private void disconnectHostFromVolume(Host host, long storagePoolId, String iqn) { + ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, false); - volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + sendModifyTargetsCommand(modifyTargetsCommand, host.getId()); + } - volumeInfo.processEvent(Event.MigrationRequested); + private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) { + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); - volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + Map details = new HashMap<>(); - if (useCloning) { - copyCmdAnswer = performResignature(volumeInfo, hostVO); - } - else { - // asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning - // even when we don't need those hosts to do this kind of copy work - hostVO = getHost(snapshotInfo.getDataCenterId(), false); + details.put(ModifyTargetsCommand.IQN, iqn); + details.put(ModifyTargetsCommand.STORAGE_TYPE, storagePool.getPoolType().name()); + details.put(ModifyTargetsCommand.STORAGE_UUID, storagePool.getUuid()); + details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress()); + details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort())); - copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO); - } + ModifyTargetsCommand modifyTargetsCommand = new ModifyTargetsCommand(); - if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { - if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { - errMsg = copyCmdAnswer.getDetails(); - } - else { - errMsg = "Unable to create volume from snapshot"; - } - } - } - catch (Exception ex) { - errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'"; - } + List> targets = new ArrayList<>(); - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + targets.add(details); - result.setResult(errMsg); + modifyTargetsCommand.setTargets(targets); + modifyTargetsCommand.setApplyToAllHostsInCluster(true); + modifyTargetsCommand.setAdd(add); + modifyTargetsCommand.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - callback.complete(result); + return modifyTargetsCommand; } - /** - * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to - * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. - * - * The resultant volume must be writable because we need to resign the SR and the VDI that should be inside of it before we copy - * the VHD file to secondary storage. - * - * If the storage system is using writable snapshots, then nothing need be done by that storage system here because we can just - * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. - */ - private void createVolumeFromSnapshot(HostVO hostVO, SnapshotInfo snapshotInfo, boolean keepGrantedAccess) { - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "create"); + private List sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer)_agentMgr.easySend(hostId, cmd); - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); + if (modifyTargetsAnswer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } - CopyCmdAnswer copyCmdAnswer = performResignature(snapshotInfo, hostVO, keepGrantedAccess); + if (!modifyTargetsAnswer.getResult()) { + String msg = "Unable to modify targets on the following host: " + hostId; - if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { - if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { - throw new CloudRuntimeException(copyCmdAnswer.getDetails()); - } else { - throw new CloudRuntimeException("Unable to create volume from snapshot"); - } + throw new CloudRuntimeException(msg); } + + return modifyTargetsAnswer.getConnectedPaths(); } - /** - * If the underlying storage system needed to create a volume from a snapshot for createVolumeFromSnapshot(HostVO, SnapshotInfo), then - * this is its opportunity to delete that temporary volume and restore properties in snapshot_details to the way they were before the - * invocation of createVolumeFromSnapshot(HostVO, SnapshotInfo). - */ - private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "delete"); + /* + * At a high level: The source storage cannot be managed and the destination storage must be managed. + */ + private void verifyLiveMigrationMapForKVM(Map volumeDataStoreMap) { + for (Map.Entry entry : volumeDataStoreMap.entrySet()) { + VolumeInfo volumeInfo = entry.getKey(); - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); - } - } + Long storagePoolId = volumeInfo.getPoolId(); + StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(storagePoolId); - private SnapshotDetailsVO handleSnapshotDetails(long csSnapshotId, String name, String value) { - _snapshotDetailsDao.removeDetail(csSnapshotId, name); + if (srcStoragePoolVO == null) { + throw new CloudRuntimeException("Volume with ID " + volumeInfo.getId() + " is not associated with a storage pool."); + } - SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false); + if (srcStoragePoolVO.isManaged()) { + throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported."); + } - return _snapshotDetailsDao.persist(snapshotDetails); + DataStore dataStore = entry.getValue(); + StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(dataStore.getId()); + + if (destStoragePoolVO == null) { + throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located."); + } + + if (!destStoragePoolVO.isManaged()) { + throw new CloudRuntimeException("Migrating a volume online with KVM can currently only be done when moving to managed storage."); + } + } } private boolean canStorageSystemCreateVolumeFromVolume(SnapshotInfo snapshotInfo) { @@ -791,7 +1730,17 @@ private boolean canStorageSystemCreateVolumeFromVolume(SnapshotInfo snapshotInfo return supportsCloningVolumeFromVolume; } - private String getProperty(long snapshotId, String property) { + private String getVolumeProperty(long volumeId, String property) { + VolumeDetailVO volumeDetails = volumeDetailsDao.findDetail(volumeId, property); + + if (volumeDetails != null) { + return volumeDetails.getValue(); + } + + return null; + } + + private String getSnapshotProperty(long snapshotId, String property) { SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, property); if (snapshotDetails != null) { @@ -801,18 +1750,124 @@ private String getProperty(long snapshotId, String property) { return null; } - private Map getVolumeDetails(VolumeInfo volumeInfo) { - Map volumeDetails = new HashMap(); + private void handleCreateTemplateFromVolume(VolumeInfo volumeInfo, TemplateInfo templateInfo, AsyncCompletionCallback callback) { + boolean srcVolumeDetached = volumeInfo.getAttachedVM() == null; - VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); + String errMsg = null; + CopyCmdAnswer copyCmdAnswer = null; + + try { + if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat())) { + throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently."); + } + + volumeInfo.processEvent(Event.MigrationRequested); + + HostVO hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.KVM, false); + DataStore srcDataStore = volumeInfo.getDataStore(); + + String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + int primaryStorageDownloadWait = NumberUtils.toInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + + try { + if (srcVolumeDetached) { + _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); + } + + Map srcDetails = getVolumeDetails(volumeInfo); + + copyCommand.setOptions(srcDetails); + + copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); + + if (!copyCmdAnswer.getResult()) { + // We were not able to copy. Handle it. + errMsg = copyCmdAnswer.getDetails(); + throw new CloudRuntimeException(errMsg); + } + + VMTemplateVO vmTemplateVO = _vmTemplateDao.findById(templateInfo.getId()); + + vmTemplateVO.setHypervisorType(HypervisorType.KVM); + + _vmTemplateDao.update(vmTemplateVO.getId(), vmTemplateVO); + } + catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { + String msg = "Failed to create template from volume (Volume ID = " + volumeInfo.getId() + ") : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + try { + if (srcVolumeDetached) { + _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); + } + } + catch (Exception ex) { + LOGGER.warn("Error revoking access to volume (Volume ID = " + volumeInfo.getId() + "): " + ex.getMessage(), ex); + } + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = copyCmdAnswer.getDetails(); + } + else { + errMsg = "Unable to create template from volume"; + } + } + + try { + if (StringUtils.isEmpty(errMsg)) { + volumeInfo.processEvent(Event.OperationSuccessed); + } + else { + volumeInfo.processEvent(Event.OperationFailed); + } + } + catch (Exception ex) { + LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); + } + } + } + catch (Exception ex) { + errMsg = ex.getMessage(); + + throw new CloudRuntimeException(errMsg); + } + finally { + if (copyCmdAnswer == null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - long storagePoolId = volumeVO.getPoolId(); + result.setResult(errMsg); + + callback.complete(result); + } + } + + private Map getVolumeDetails(VolumeInfo volumeInfo) { + long storagePoolId = volumeInfo.getPoolId(); StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); + if (!storagePoolVO.isManaged()) { + return null; + } + + Map volumeDetails = new HashMap<>(); + + VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); + volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize())); + volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID)); + ChapInfo chapInfo = _volumeService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); if (chapInfo != null) { @@ -836,34 +1891,59 @@ private String getProperty(long snapshotId, String property) { long snapshotId = snapshotInfo.getId(); - snapshotDetails.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN)); + snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); + snapshotDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(snapshotInfo.getSize())); + snapshotDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getSnapshotProperty(snapshotId, DiskTO.SCSI_NAA_DEVICE_ID)); - snapshotDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME)); - snapshotDetails.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET)); - snapshotDetails.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME)); - snapshotDetails.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET)); + snapshotDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, getSnapshotProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME)); + snapshotDetails.put(DiskTO.CHAP_INITIATOR_SECRET, getSnapshotProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET)); + snapshotDetails.put(DiskTO.CHAP_TARGET_USERNAME, getSnapshotProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME)); + snapshotDetails.put(DiskTO.CHAP_TARGET_SECRET, getSnapshotProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET)); return snapshotDetails; } private HostVO getHost(SnapshotInfo snapshotInfo) { - HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), true); + HypervisorType hypervisorType = snapshotInfo.getHypervisorType(); - if (hostVO == null) { - hostVO = getHost(snapshotInfo.getDataCenterId(), false); + if (HypervisorType.XenServer.equals(hypervisorType)) { + HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, true); if (hostVO == null) { - throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId()); + hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + + if (hostVO == null) { + throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId()); + } } + + return hostVO; + } + + if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { + return getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + } + + throw new CloudRuntimeException("Unsupported hypervisor type"); + } + + private HostVO getHostInCluster(long clusterId) { + List hosts = _hostDao.findByClusterId(clusterId); + + if (hosts != null && hosts.size() > 0) { + Collections.shuffle(hosts, RANDOM); + + return hosts.get(0); } - return hostVO; + throw new CloudRuntimeException("Unable to locate a host"); } - private HostVO getHost(Long zoneId, boolean computeClusterMustSupportResign) { + private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); + Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null."); - List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, HypervisorType.XenServer); + List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); if (hosts == null) { return null; @@ -896,15 +1976,6 @@ private HostVO getHost(Long zoneId, boolean computeClusterMustSupportResign) { return null; } - @Override - public void copyAsync(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback callback) { - CopyCommandResult result = new CopyCommandResult(null, null); - - result.setResult("Unsupported operation requested for copying data."); - - callback.complete(result); - } - private Map getDetails(DataObject dataObj) { if (dataObj instanceof VolumeInfo) { return getVolumeDetails((VolumeInfo)dataObj); @@ -916,19 +1987,45 @@ else if (dataObj instanceof SnapshotInfo) { throw new CloudRuntimeException("'dataObj' must be of type 'VolumeInfo' or 'SnapshotInfo'."); } - private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO) { - return performResignature(dataObj, hostVO, false); + private boolean isForVMware(DataObject dataObj) { + if (dataObj instanceof VolumeInfo) { + return ImageFormat.OVA.equals(((VolumeInfo)dataObj).getFormat()); + } + + if (dataObj instanceof SnapshotInfo) { + return ImageFormat.OVA.equals(((SnapshotInfo)dataObj).getBaseVolume().getFormat()); + } + + return dataObj instanceof TemplateInfo && HypervisorType.VMware.equals(((TemplateInfo)dataObj).getHypervisorType()); + } + + private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, Map extraDetails) { + return performResignature(dataObj, hostVO, extraDetails, false); } - private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, boolean keepGrantedAccess) { + private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, Map extraDetails, boolean keepGrantedAccess) { long storagePoolId = dataObj.getDataStore().getId(); DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); Map details = getDetails(dataObj); + if (extraDetails != null) { + details.putAll(extraDetails); + } + ResignatureCommand command = new ResignatureCommand(details); - ResignatureAnswer answer = null; + ResignatureAnswer answer; + + GlobalLock lock = GlobalLock.getInternLock(dataStore.getUuid()); + + if (!lock.lock(LOCK_TIME_IN_SECONDS)) { + String errMsg = "Couldn't lock the DB (in performResignature) on the following string: " + dataStore.getUuid(); + + LOGGER.warn(errMsg); + + throw new CloudRuntimeException(errMsg); + } try { _volumeService.grantAccess(dataObj, hostVO, dataStore); @@ -945,7 +2042,10 @@ private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, bool throw new CloudRuntimeException(msg + ex.getMessage()); } finally { - if (keepGrantedAccess == false) { + lock.unlock(); + lock.releaseRef(); + + if (!keepGrantedAccess) { _volumeService.revokeAccess(dataObj, hostVO, dataStore); } } @@ -972,19 +2072,148 @@ private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, bool return new CopyCmdAnswer(newVolume); } - protected DataObject cacheSnapshotChain(SnapshotInfo snapshot, Scope scope) { + private DataObject cacheSnapshotChain(SnapshotInfo snapshot, Scope scope) { DataObject leafData = null; DataStore store = cacheMgr.getCacheStorage(snapshot, scope); + while (snapshot != null) { DataObject cacheData = cacheMgr.createCacheObject(snapshot, store); + if (leafData == null) { leafData = cacheData; } + snapshot = snapshot.getParent(); } + return leafData; } + private String migrateVolume(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) { + boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null; + + try { + Map srcDetails = getVolumeDetails(srcVolumeInfo); + Map destDetails = getVolumeDetails(destVolumeInfo); + + MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), + srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); + + if (srcVolumeDetached) { + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + } + + _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + + MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)_agentMgr.send(hostVO.getId(), migrateVolumeCommand); + + if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) { + if (migrateVolumeAnswer != null && !StringUtils.isEmpty(migrateVolumeAnswer.getDetails())) { + throw new CloudRuntimeException(migrateVolumeAnswer.getDetails()); + } + else { + throw new CloudRuntimeException(errMsg); + } + } + + if (srcVolumeDetached) { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } + + try { + _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + } + catch (Exception e) { + // This volume should be deleted soon, so just log a warning here. + LOGGER.warn(e.getMessage(), e); + } + + return migrateVolumeAnswer.getVolumePath(); + } + catch (Exception ex) { + try { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } + catch (Exception e) { + // This volume should be deleted soon, so just log a warning here. + LOGGER.warn(e.getMessage(), e); + } + + if (srcVolumeDetached) { + _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + } + + String msg = "Failed to perform volume migration : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + } + + private String copyVolumeToSecondaryStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) { + boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null; + + try { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); + Map srcDetails = getVolumeDetails(srcVolumeInfo); + + CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, + destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); + + copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); + copyVolumeCommand.setSrcDetails(srcDetails); + + if (srcVolumeDetached) { + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + } + + CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)_agentMgr.send(hostVO.getId(), copyVolumeCommand); + + if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) { + if (copyVolumeAnswer != null && !StringUtils.isEmpty(copyVolumeAnswer.getDetails())) { + throw new CloudRuntimeException(copyVolumeAnswer.getDetails()); + } + else { + throw new CloudRuntimeException(errMsg); + } + } + + return copyVolumeAnswer.getVolumePath(); + } + catch (Exception ex) { + String msg = "Failed to perform volume copy to secondary storage : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage()); + } + finally { + if (srcVolumeDetached) { + _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + } + } + } + + private void setCertainVolumeValuesNull(long volumeId) { + VolumeVO volumeVO = _volumeDao.findById(volumeId); + + volumeVO.set_iScsiName(null); + volumeVO.setMinIops(null); + volumeVO.setMaxIops(null); + volumeVO.setHypervisorSnapshotReserve(null); + + _volumeDao.update(volumeId, volumeVO); + } + + private void updateVolumePath(long volumeId, String path) { + VolumeVO volumeVO = _volumeDao.findById(volumeId); + + volumeVO.setPath(path); + + _volumeDao.update(volumeId, volumeVO); + } + /** * Copies data from secondary storage to a primary volume * @param volumeInfo The primary volume @@ -1033,7 +2262,7 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps LOGGER.warn(msg, ex); - throw new CloudRuntimeException(msg + ex.getMessage()); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { if (Snapshot.LocationType.PRIMARY.equals(locationType)) { diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index 5a4eee4434b..88c385b4e61 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -17,12 +17,15 @@ package org.apache.cloudstack.storage.snapshot; import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.dc.dao.ClusterDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -46,10 +49,16 @@ import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotService; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.common.base.Optional; import com.google.common.base.Preconditions; + import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; @@ -64,17 +73,21 @@ import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.inject.Inject; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.UUID; @Component public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { @@ -89,14 +102,18 @@ @Inject private SnapshotDao snapshotDao; @Inject private SnapshotDataFactory snapshotDataFactory; @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Inject SnapshotDataStoreDao snapshotStoreDao; + @Inject private VolumeDetailsDao volumeDetailsDao; @Inject private VMInstanceDao vmInstanceDao; + @Inject private VMSnapshotDao vmSnapshotDao; + @Inject private VMSnapshotService vmSnapshotService; @Inject private VolumeDao volumeDao; @Inject private VolumeService volService; @Inject private VolumeDetailsDao _volumeDetailsDaoImpl; @Override public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { - Preconditions.checkArgument(snapshotInfo != null, "backupSnapshot expects a valid snapshot"); + Preconditions.checkArgument(snapshotInfo != null, "'snapshotInfo' cannot be 'null'."); if (snapshotInfo.getLocationType() != Snapshot.LocationType.SECONDARY) { markAsBackedUp((SnapshotObject)snapshotInfo); @@ -107,14 +124,24 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { // At this point, the snapshot is either taken as a native // snapshot on the storage or exists as a volume on the storage (clone). // If archive flag is passed in, we should copy this snapshot to secondary - // storage and delete it from the primary storage. + // storage and delete it from primary storage. HostVO host = getHost(snapshotInfo.getVolumeId()); + boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(snapshotInfo.getBaseVolume().getPoolId()); + + if (!canStorageSystemCreateVolumeFromSnapshot) { + String msg = "Cannot archive snapshot: 'canStorageSystemCreateVolumeFromSnapshot' was false."; + + s_logger.warn(msg); + + throw new CloudRuntimeException(msg); + } + boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(host.getClusterId()); - if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) { - String msg = "Cannot archive snapshot: canStorageSystemCreateVolumeFromSnapshot and/or computeClusterSupportsResign were false."; + if (!computeClusterSupportsResign) { + String msg = "Cannot archive snapshot: 'computeClusterSupportsResign' was false."; s_logger.warn(msg); @@ -126,6 +153,8 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { @Override public boolean deleteSnapshot(Long snapshotId) { + Preconditions.checkArgument(snapshotId != null, "'snapshotId' cannot be 'null'."); + SnapshotVO snapshotVO = snapshotDao.findById(snapshotId); if (Snapshot.State.Destroyed.equals(snapshotVO.getState())) { @@ -139,23 +168,21 @@ public boolean deleteSnapshot(Long snapshotId) { } if (!Snapshot.State.BackedUp.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the following state: " + snapshotVO.getState()); + throw new InvalidParameterValueException("Unable to delete snapshot '" + snapshotId + + "' because it is in the following state: " + snapshotVO.getState()); } return cleanupSnapshotOnPrimaryStore(snapshotId); } /** - * Cleans up a snapshot which was taken on a primary store. This function - * removes + * This cleans up a snapshot which was taken on a primary store. * - * @param snapshotId: ID of snapshot that needs to be removed - * @return true if snapshot is removed, false otherwise + * @param snapshotId: ID of snapshot to be removed + * @return true if snapshot is removed; else, false */ - @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_OFF_PRIMARY, eventDescription = "deleting snapshot", async = true) private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { - SnapshotObject snapshotObj = (SnapshotObject)snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary); if (snapshotObj == null) { @@ -167,13 +194,13 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { } if (ObjectInDataStoreStateMachine.State.Copying.equals(snapshotObj.getStatus())) { - throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the copying state."); + throw new InvalidParameterValueException("Unable to delete snapshot '" + snapshotId + "' because it is in the copying state"); } try { snapshotObj.processEvent(Snapshot.Event.DestroyRequested); - List volumesFromSnapshot; - volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null); + + List volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null); if (volumesFromSnapshot.size() > 0) { try { @@ -181,6 +208,7 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { } catch (NoTransitionException e1) { s_logger.debug("Failed to change snapshot state: " + e1.toString()); } + throw new InvalidParameterValueException("Unable to perform delete operation, Snapshot with id: " + snapshotId + " is in use "); } } @@ -194,6 +222,7 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { snapshotSvr.deleteSnapshot(snapshotObj); snapshotObj.processEvent(Snapshot.Event.OperationSucceeded); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_OFF_PRIMARY, snapshotObj.getAccountId(), snapshotObj.getDataCenterId(), snapshotId, snapshotObj.getName(), null, null, 0L, snapshotObj.getClass().getName(), snapshotObj.getUuid()); } @@ -209,12 +238,202 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { return false; } + return true; } + private boolean isAcceptableRevertFormat(VolumeVO volumeVO) { + return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) || ImageFormat.QCOW2.equals(volumeVO.getFormat()); + } + + private void verifyFormat(VolumeInfo volumeInfo) { + ImageFormat imageFormat = volumeInfo.getFormat(); + + if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) { + throw new CloudRuntimeException("Only the following image types are currently supported: " + + ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", and " + ImageFormat.QCOW2); + } + } + + private void verifyDiskTypeAndHypervisor(VolumeInfo volumeInfo) { + ImageFormat imageFormat = volumeInfo.getFormat(); + Volume.Type volumeType = volumeInfo.getVolumeType(); + + if (ImageFormat.OVA.equals(imageFormat) && Volume.Type.ROOT.equals(volumeType)) { + throw new CloudRuntimeException("The hypervisor type is VMware and the disk type is ROOT. For this situation, " + + "recover the data on the snapshot by creating a new CloudStack volume from the corresponding volume snapshot."); + } + } + + private void verifySnapshotType(SnapshotInfo snapshotInfo) { + if (snapshotInfo.getHypervisorType() == HypervisorType.KVM && snapshotInfo.getDataStore().getRole() != DataStoreRole.Primary) { + throw new CloudRuntimeException("For the KVM hypervisor type, you can only revert a volume to a snapshot state if the snapshot " + + "resides on primary storage. For other snapshot types, create a volume from the snapshot to recover its data."); + } + } + + private void verifyLocationType(SnapshotInfo snapshotInfo) { + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + + if (snapshotInfo.getLocationType() == Snapshot.LocationType.SECONDARY && volumeInfo.getFormat() != ImageFormat.VHD) { + throw new CloudRuntimeException("Only the '" + ImageFormat.VHD + "' image type can be used when 'LocationType' is set to 'SECONDARY'."); + } + } + + private boolean getHypervisorRequiresResignature(VolumeInfo volumeInfo) { + return ImageFormat.VHD.equals(volumeInfo.getFormat()) || ImageFormat.OVA.equals(volumeInfo.getFormat()); + } + @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { - throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead."); + public boolean revertSnapshot(SnapshotInfo snapshotInfo) { + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + + verifyFormat(volumeInfo); + + verifyDiskTypeAndHypervisor(volumeInfo); + + verifySnapshotType(snapshotInfo); + + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshotInfo.getId(), DataStoreRole.Primary); + + if (snapshotStore != null) { + long snapshotStoragePoolId = snapshotStore.getDataStoreId(); + + if (!volumeInfo.getPoolId().equals(snapshotStoragePoolId)) { + String errMsg = "Storage pool mismatch"; + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + } + + boolean storageSystemSupportsCapability = storageSystemSupportsCapability(volumeInfo.getPoolId(), + DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString()); + + if (!storageSystemSupportsCapability) { + String errMsg = "Storage pool revert capability not supported"; + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId()); + + if (snapshotVO == null) { + String errMsg = "Failed to acquire lock on the following snapshot: " + snapshotInfo.getId(); + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + Long hostId = null; + boolean success = false; + + try { + volumeInfo.stateTransit(Volume.Event.RevertSnapshotRequested); + + if (getHypervisorRequiresResignature(volumeInfo)) { + hostId = getHostId(volumeInfo); + + if (hostId != null) { + HostVO hostVO = hostDao.findById(hostId); + DataStore dataStore = dataStoreMgr.getDataStore(volumeInfo.getPoolId(), DataStoreRole.Primary); + + volService.revokeAccess(volumeInfo, hostVO, dataStore); + + modifyTarget(false, volumeInfo, hostId); + } + } + + success = snapshotSvr.revertSnapshot(snapshotInfo); + + if (!success) { + String errMsg = "Failed to revert a volume to a snapshot state"; + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + } + finally { + if (getHypervisorRequiresResignature(volumeInfo)) { + if (hostId != null) { + HostVO hostVO = hostDao.findById(hostId); + DataStore dataStore = dataStoreMgr.getDataStore(volumeInfo.getPoolId(), DataStoreRole.Primary); + + volService.grantAccess(volumeInfo, hostVO, dataStore); + + modifyTarget(true, volumeInfo, hostId); + } + } + + if (success) { + volumeInfo.stateTransit(Volume.Event.OperationSucceeded); + } + else { + volumeInfo.stateTransit(Volume.Event.OperationFailed); + } + + snapshotDao.releaseFromLockTable(snapshotInfo.getId()); + } + + return true; + } + + private Long getHostId(VolumeInfo volumeInfo) { + VirtualMachine virtualMachine = volumeInfo.getAttachedVM(); + + if (virtualMachine == null) { + return null; + } + + Long hostId = virtualMachine.getHostId(); + + if (hostId == null) { + hostId = virtualMachine.getLastHostId(); + } + + return hostId; + } + + private void modifyTarget(boolean add, VolumeInfo volumeInfo, long hostId) { + StoragePoolVO storagePoolVO = storagePoolDao.findById(volumeInfo.getPoolId()); + + Map details = new HashMap<>(3); + + details.put(ModifyTargetsCommand.IQN, volumeInfo.get_iScsiName()); + details.put(ModifyTargetsCommand.STORAGE_HOST, storagePoolVO.getHostAddress()); + details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + + List> targets = new ArrayList<>(1); + + targets.add(details); + + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(add); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.BOTH); + + sendModifyTargetsCommand(cmd, hostId); + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); + } + + if (!answer.getResult()) { + String msg = "Unable to modify targets on the following host: " + hostId; + + throw new CloudRuntimeException(msg); + } } @Override @@ -222,8 +441,23 @@ public boolean revertSnapshot(SnapshotInfo snapshot) { public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); - if (volumeInfo.getFormat() != ImageFormat.VHD) { - throw new CloudRuntimeException("Only the " + ImageFormat.VHD.toString() + " image type is currently supported."); + verifyFormat(volumeInfo); + verifyLocationType(snapshotInfo); + + final boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId()); + final boolean computeClusterSupportsVolumeClone; + + // only XenServer, VMware and KVM are currently supported + if (volumeInfo.getFormat() == ImageFormat.VHD) { + HostVO hostVO = getHost(volumeInfo.getId()); + + computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); + } + else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2) { + computeClusterSupportsVolumeClone = true; + } + else { + throw new CloudRuntimeException("Unsupported format"); } SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId()); @@ -232,22 +466,33 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { throw new CloudRuntimeException("Failed to acquire lock on the following snapshot: " + snapshotInfo.getId()); } + VMSnapshot vmSnapshot = null; + + if (ImageFormat.OVA.equals(volumeInfo.getFormat())) { + setVmdk(snapshotInfo, volumeInfo); + + try { + vmSnapshot = takeHypervisorSnapshot(volumeInfo); + } + catch (ResourceAllocationException ex) { + String errMsg = "Unable to allocate VM snapshot"; + + s_logger.error(errMsg, ex); + + throw new CloudRuntimeException(errMsg, ex); + } + } + SnapshotResult result = null; SnapshotInfo snapshotOnPrimary = null; try { volumeInfo.stateTransit(Volume.Event.SnapshotRequested); - // only XenServer is currently supported - HostVO hostVO = getHost(volumeInfo.getId()); - - boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId()); - boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); - - // if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign, then take a back-end snapshot or create a back-end clone; + // if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsVolumeClone, then take a back-end snapshot or create a back-end clone; // else, just create a new back-end volume (eventually used to create a new SR on and to copy a VDI to) - if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign) { + if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsVolumeClone) { SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(), "takeSnapshot", Boolean.TRUE.toString(), @@ -264,7 +509,7 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { throw new CloudRuntimeException(result.getResult()); } - if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) { + if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsVolumeClone) { performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo); } @@ -276,6 +521,12 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { } else { volumeInfo.stateTransit(Volume.Event.OperationFailed); } + + if (ImageFormat.OVA.equals(volumeInfo.getFormat())) { + if (vmSnapshot != null) { + deleteHypervisorSnapshot(vmSnapshot); + } + } } snapshotDao.releaseFromLockTable(snapshotInfo.getId()); @@ -298,6 +549,68 @@ public void postSnapshotCreation(SnapshotInfo snapshot) { } + private VMSnapshot takeHypervisorSnapshot(VolumeInfo volumeInfo) throws ResourceAllocationException { + VirtualMachine virtualMachine = volumeInfo.getAttachedVM(); + + if (virtualMachine != null && VirtualMachine.State.Running.equals(virtualMachine.getState())) { + String vmSnapshotName = UUID.randomUUID().toString().replace("-", ""); + + VMSnapshotVO vmSnapshotVO = + new VMSnapshotVO(virtualMachine.getAccountId(), virtualMachine.getDomainId(), virtualMachine.getId(), vmSnapshotName, vmSnapshotName, + vmSnapshotName, virtualMachine.getServiceOfferingId(), VMSnapshot.Type.Disk, null); + + VMSnapshot vmSnapshot = vmSnapshotDao.persist(vmSnapshotVO); + + if (vmSnapshot == null) { + throw new CloudRuntimeException("Unable to allocate a VM snapshot object"); + } + + vmSnapshot = vmSnapshotService.createVMSnapshot(virtualMachine.getId(), vmSnapshot.getId(), true); + + if (vmSnapshot == null) { + throw new CloudRuntimeException("Unable to create a hypervisor-side snapshot"); + } + + try { + Thread.sleep(60000); + } + catch (Exception ex) { + s_logger.warn(ex.getMessage(), ex); + } + + return vmSnapshot; + } + + // We didn't need to take a hypervisor-side snapshot. Return 'null' to indicate this. + return null; + } + + private void deleteHypervisorSnapshot(VMSnapshot vmSnapshot) { + boolean success = vmSnapshotService.deleteVMSnapshot(vmSnapshot.getId()); + + if (!success) { + throw new CloudRuntimeException("Unable to delete the hypervisor-side snapshot"); + } + } + + private void setVmdk(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo) { + if (!ImageFormat.OVA.equals(volumeInfo.getFormat())) { + return; + } + + String search = "]"; + + String path = volumeInfo.getPath(); + int startIndex = path.indexOf(search); + + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(), + DiskTO.VMDK, + path.substring(startIndex + search.length()).trim(), + false); + + snapshotDetailsDao.persist(snapshotDetail); + } + private void updateLocationTypeInDb(SnapshotInfo snapshotInfo) { Object objPayload = snapshotInfo.getPayload(); @@ -313,19 +626,23 @@ private void updateLocationTypeInDb(SnapshotInfo snapshotInfo) { } private boolean canStorageSystemCreateVolumeFromSnapshot(long storagePoolId) { - boolean supportsCloningVolumeFromSnapshot = false; + return storageSystemSupportsCapability(storagePoolId, DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString()); + } + + private boolean storageSystemSupportsCapability(long storagePoolId, String capability) { + boolean supportsCapability = false; DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); Map mapCapabilities = dataStore.getDriver().getCapabilities(); if (mapCapabilities != null) { - String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString()); + String value = mapCapabilities.get(capability); - supportsCloningVolumeFromSnapshot = Boolean.valueOf(value); + supportsCapability = Boolean.valueOf(value); } - return supportsCloningVolumeFromSnapshot; + return supportsCapability; } private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo) { @@ -385,7 +702,7 @@ private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInf SnapshotAndCopyCommand snapshotAndCopyCommand = new SnapshotAndCopyCommand(volumeInfo.getPath(), sourceDetails, destDetails); - SnapshotAndCopyAnswer snapshotAndCopyAnswer = null; + SnapshotAndCopyAnswer snapshotAndCopyAnswer; try { // if sourceDetails != null, we need to connect the host(s) to the volume @@ -589,41 +906,104 @@ private void markAsBackedUp(SnapshotObject snapshotObj) { } } + private boolean usingBackendSnapshotFor(long snapshotId) { + String property = getProperty(snapshotId, "takeSnapshot"); + + return Boolean.parseBoolean(property); + } + @Override public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) { - if (SnapshotOperation.REVERT.equals(op)) { - return StrategyPriority.CANT_HANDLE; + Snapshot.LocationType locationType = snapshot.getLocationType(); + + // If the snapshot exists on Secondary Storage, we can't delete it. + if (SnapshotOperation.DELETE.equals(op)) { + if (Snapshot.LocationType.SECONDARY.equals(locationType)) { + return StrategyPriority.CANT_HANDLE; + } + + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image); + + // If the snapshot exists on Secondary Storage, we can't delete it. + if (snapshotStore != null) { + return StrategyPriority.CANT_HANDLE; + } + + snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); + + if (snapshotStore == null) { + return StrategyPriority.CANT_HANDLE; + } + + long snapshotStoragePoolId = snapshotStore.getDataStoreId(); + + boolean storageSystemSupportsCapability = storageSystemSupportsCapability(snapshotStoragePoolId, DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); + + return storageSystemSupportsCapability ? StrategyPriority.HIGHEST : StrategyPriority.CANT_HANDLE; } long volumeId = snapshot.getVolumeId(); VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); - long storagePoolId = volumeVO.getPoolId(); + long volumeStoragePoolId = volumeVO.getPoolId(); - DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + if (SnapshotOperation.REVERT.equals(op)) { + boolean baseVolumeExists = volumeVO.getRemoved() == null; - Snapshot.LocationType locationType = snapshot.getLocationType(); + if (baseVolumeExists) { + boolean acceptableFormat = isAcceptableRevertFormat(volumeVO); - // If the snapshot exists on Secondary Storage, we can't delete it. - if (SnapshotOperation.DELETE.equals(op) && Snapshot.LocationType.SECONDARY.equals(locationType)) { - return StrategyPriority.CANT_HANDLE; - } + if (acceptableFormat) { + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); - if (dataStore != null) { - Map mapCapabilities = dataStore.getDriver().getCapabilities(); + boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshot.getId()); - if (mapCapabilities != null) { - String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); - Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value); + if (usingBackendSnapshot) { + if (snapshotStore != null) { + long snapshotStoragePoolId = snapshotStore.getDataStoreId(); - if (supportsStorageSystemSnapshots) { - return StrategyPriority.HIGHEST; + boolean storageSystemSupportsCapability = storageSystemSupportsCapability(snapshotStoragePoolId, + DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString()); + + if (storageSystemSupportsCapability) { + return StrategyPriority.HIGHEST; + } + + storageSystemSupportsCapability = storageSystemSupportsCapability(volumeStoragePoolId, + DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString()); + + if (storageSystemSupportsCapability) { + return StrategyPriority.HIGHEST; + } + } + } + else { + if (snapshotStore != null) { + long snapshotStoragePoolId = snapshotStore.getDataStoreId(); + + StoragePoolVO storagePoolVO = storagePoolDao.findById(snapshotStoragePoolId); + + if (storagePoolVO.isManaged()) { + return StrategyPriority.HIGHEST; + } + } + + StoragePoolVO storagePoolVO = storagePoolDao.findById(volumeStoragePoolId); + + if (storagePoolVO.isManaged()) { + return StrategyPriority.HIGHEST; + } + } } } + + return StrategyPriority.CANT_HANDLE; } - return StrategyPriority.CANT_HANDLE; + boolean storageSystemSupportsCapability = storageSystemSupportsCapability(volumeStoragePoolId, DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); + + return storageSystemSupportsCapability ? StrategyPriority.HIGHEST : StrategyPriority.CANT_HANDLE; } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index ad0418f5c9d..36313058e92 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -71,7 +71,9 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.storage.ListVolumeAnswer; import com.cloud.agent.api.storage.ListVolumeCommand; import com.cloud.agent.api.storage.ResizeVolumeCommand; @@ -124,6 +126,8 @@ public class VolumeServiceImpl implements VolumeService { private static final Logger s_logger = Logger.getLogger(VolumeServiceImpl.class); @Inject + protected AgentManager agentMgr; + @Inject VolumeDao volDao; @Inject PrimaryDataStoreProviderManager dataStoreMgr; @@ -895,12 +899,12 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T copyCaller.setCallback(copyCaller.getTarget().copyManagedTemplateCallback(null, null)).setContext(copyContext); // Populate details which will be later read by the storage subsystem. - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); - details.put(PrimaryDataStore.MANAGED_STORE_TARGET, ((TemplateObject)templateOnPrimary).getInstallPath()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, templateOnPrimary.getInstallPath()); details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); @@ -920,7 +924,7 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); - VolumeApiResult result = null; + VolumeApiResult result; try { motionSrv.copyAsync(srcTemplateInfo, templateOnPrimary, destHost, copyCaller); @@ -929,6 +933,16 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T } finally { revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore); + + if (HypervisorType.VMware.equals(destHost.getHypervisorType())) { + details.put(ModifyTargetsCommand.IQN, templateOnPrimary.getInstallPath()); + + List> targets = new ArrayList<>(); + + targets.add(details); + + removeDynamicTargets(destHost.getId(), targets); + } } if (result.isFailed()) { @@ -951,6 +965,32 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T } } + private void removeDynamicTargets(long hostId, List> targets) { + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(false); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); + + sendModifyTargetsCommand(cmd, hostId); + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = agentMgr.easySend(hostId, cmd); + + if (answer == null) { + String msg = "Unable to get an answer to the modify targets command"; + + s_logger.warn(msg); + } + else if (!answer.getResult()) { + String msg = "Unable to modify target on the following host: " + hostId; + + s_logger.warn(msg); + } + } + /** * Clones the template volume on managed storage to the ROOT volume * @@ -1085,12 +1125,12 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()) ); - boolean computeZoneSupportsResign = computeZoneSupportsResign(destHost.getDataCenterId(), destHost.getHypervisorType()); + boolean computeSupportsVolumeClone = computeSupportsVolumeClone(destHost.getDataCenterId(), destHost.getHypervisorType()); AsyncCallFuture future = new AsyncCallFuture<>(); - if (storageCanCloneVolume && computeZoneSupportsResign) { - s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and host cluster can perform UUID resigning."); + if (storageCanCloneVolume && computeSupportsVolumeClone) { + s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); TemplateInfo templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId()); @@ -1118,16 +1158,22 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary // We have a template on primary storage. Clone it to new volume. s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); + createManagedVolumeCopyTemplateAsync(volumeInfo, destPrimaryDataStore, srcTemplateInfo, destHost, future); } return future; } - private boolean computeZoneSupportsResign(long zoneId, HypervisorType hypervisorType) { + private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) { + if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { + return true; + } + return getHost(zoneId, hypervisorType, true) != null; } @@ -1757,7 +1803,17 @@ protected Void registerVolumeCallback(AsyncCallbackDispatcher context = new CreateVolumeContext(null, volume, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().resizeVolumeCallback(caller, context)).setContext(context); - volume.getDataStore().getDriver().resize(volume, caller); + + try { + volume.getDataStore().getDriver().resize(volume, caller); + } catch (Exception e) { + s_logger.debug("Failed to change state to resize", e); + + result.setResult(e.toString()); + + future.complete(result); + } + return future; } diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java index ba873aad124..66b92281efa 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java @@ -25,6 +25,7 @@ import com.cloud.user.AccountDetailVO; import com.cloud.user.AccountDetailsDao; import com.cloud.utils.exception.CloudRuntimeException; + import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; import org.springframework.stereotype.Component; @@ -46,9 +47,11 @@ public long getSolidFireAccountId(String csAccountUuid, String storagePoolUuid) long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid); AccountDetailVO accountDetail = accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId)); + if (accountDetail == null){ throw new CloudRuntimeException("Unable to find SF account for storage " + storagePoolUuid + " for CS account " + csAccountUuid); } + String sfAccountId = accountDetail.getValue(); return Long.parseLong(sfAccountId); diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java index 427af111ed0..4cbf74aba67 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java @@ -27,14 +27,16 @@ import com.cloud.user.Account; import com.cloud.user.dao.AccountDao; import com.cloud.utils.exception.CloudRuntimeException; + import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import javax.inject.Inject; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; + public class SolidFireIntegrationTestUtil { @Inject private AccountDao accountDao; @Inject private ClusterDao clusterDao; @@ -47,15 +49,18 @@ private SolidFireIntegrationTestUtil() {} public long getAccountIdForAccountUuid(String accountUuid) { Account account = accountDao.findByUuid(accountUuid); - if (account == null){ + + if (account == null) { throw new CloudRuntimeException("Unable to find Account for ID: " + accountUuid); } + return account.getAccountId(); } public long getAccountIdForVolumeUuid(String volumeUuid) { VolumeVO volume = volumeDao.findByUuid(volumeUuid); - if (volume == null){ + + if (volume == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid); } @@ -64,15 +69,18 @@ public long getAccountIdForVolumeUuid(String volumeUuid) { public long getAccountIdForSnapshotUuid(String snapshotUuid) { SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid); - if (snapshot == null){ + + if (snapshot == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + snapshotUuid); } + return snapshot.getAccountId(); } public long getClusterIdForClusterUuid(String clusterUuid) { ClusterVO cluster = clusterDao.findByUuid(clusterUuid); - if (cluster == null){ + + if (cluster == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + clusterUuid); } @@ -81,7 +89,8 @@ public long getClusterIdForClusterUuid(String clusterUuid) { public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) { StoragePoolVO storagePool = storagePoolDao.findByUuid(storagePoolUuid); - if (storagePool == null){ + + if (storagePool == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + storagePoolUuid); } @@ -90,7 +99,8 @@ public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) { public String getPathForVolumeUuid(String volumeUuid) { VolumeVO volume = volumeDao.findByUuid(volumeUuid); - if (volume == null){ + + if (volume == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid); } @@ -99,7 +109,8 @@ public String getPathForVolumeUuid(String volumeUuid) { public String getVolume_iScsiName(String volumeUuid) { VolumeVO volume = volumeDao.findByUuid(volumeUuid); - if (volume == null){ + + if (volume == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid); } @@ -108,7 +119,8 @@ public String getVolume_iScsiName(String volumeUuid) { public List getSnapshotDetails(String snapshotUuid) { SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid); - if (snapshot == null){ + + if (snapshot == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + snapshotUuid); } diff --git a/plugins/api/vmware-sioc/pom.xml b/plugins/api/vmware-sioc/pom.xml new file mode 100644 index 00000000000..2845c7cb3e2 --- /dev/null +++ b/plugins/api/vmware-sioc/pom.xml @@ -0,0 +1,47 @@ + + + 4.0.0 + cloud-plugin-api-vmware-sioc + Apache CloudStack Plugin - API VMware SIOC + + org.apache.cloudstack + cloudstack-plugins + 4.11.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-hypervisor-vmware + ${project.version} + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + -Xmx1024m + + + + + diff --git a/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/module.properties b/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/module.properties new file mode 100644 index 00000000000..826e644b2a1 --- /dev/null +++ b/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vmware-sioc +parent=api \ No newline at end of file diff --git a/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/spring-sioc-context.xml b/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/spring-sioc-context.xml new file mode 100644 index 00000000000..d87b2f5aedc --- /dev/null +++ b/plugins/api/vmware-sioc/resources/META-INF/cloudstack/vmware-sioc/spring-sioc-context.xml @@ -0,0 +1,33 @@ + + + + + + + diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java new file mode 100644 index 00000000000..d0561aa1158 --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.sioc; + +import javax.inject.Inject; + +import org.apache.log4j.Logger; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.api.response.sioc.ApiUpdateSiocInfoResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.sioc.SiocManager; + +import com.cloud.user.Account; + +@APICommand(name = UpdateSiocInfoCmd.APINAME, description = "Update SIOC info", responseObject = ApiUpdateSiocInfoResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + since = "4.11.0", + authorized = {RoleType.Admin}) +public class UpdateSiocInfoCmd extends BaseCmd { + private static final Logger s_logger = Logger.getLogger(UpdateSiocInfoCmd.class.getName()); + + public static final String APINAME = "updateSiocInfo"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "Zone ID", required = true) + private long zoneId; + + @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "Storage Pool ID", required = true) + private long storagePoolId; + + @Parameter(name = "sharespergb", type = CommandType.INTEGER, description = "Shares per GB", required = true) + private int sharesPerGB; + + @Parameter(name = "limitiopspergb", type = CommandType.INTEGER, description = "Limit IOPS per GB", required = true) + private int limitIopsPerGB; + + @Parameter(name = "iopsnotifythreshold", type = CommandType.INTEGER, description = "Notify if IOPS above this value", required = true) + private int iopsNotifyThreshold; + + @Inject private SiocManager manager; + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public void execute() { + s_logger.info("'UpdateSiocInfoCmd.execute' method invoked"); + + String msg = "Success"; + + try { + manager.updateSiocInfo(zoneId, storagePoolId, sharesPerGB, limitIopsPerGB, iopsNotifyThreshold); + } + catch (Exception ex) { + msg = ex.getMessage(); + } + + ApiUpdateSiocInfoResponse response = new ApiUpdateSiocInfoResponse(msg); + + response.setResponseName(getCommandName()); + response.setObjectName("apiupdatesiocinfo"); + + setResponseObject(response); + } +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/response/sioc/ApiUpdateSiocInfoResponse.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/response/sioc/ApiUpdateSiocInfoResponse.java new file mode 100644 index 00000000000..eb4783987f5 --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/response/sioc/ApiUpdateSiocInfoResponse.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response.sioc; + +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class ApiUpdateSiocInfoResponse extends BaseResponse { + @SerializedName("msg") + @Param(description = "The return message from the operation ('Success' if successful)") + private String _msg; + + public ApiUpdateSiocInfoResponse(String msg) { + _msg = msg; + } +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocService.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocService.java new file mode 100644 index 00000000000..7b622dc25be --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocService.java @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.sioc; + +import com.cloud.utils.component.PluggableService; + +public interface ApiSiocService extends PluggableService { +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocServiceImpl.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocServiceImpl.java new file mode 100644 index 00000000000..1a91fd9e20d --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/api/sioc/ApiSiocServiceImpl.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.sioc; + +import java.util.List; +import java.util.ArrayList; + +import org.apache.cloudstack.api.command.admin.sioc.UpdateSiocInfoCmd; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.AdapterBase; + +@Component +public class ApiSiocServiceImpl extends AdapterBase implements ApiSiocService { + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + + cmdList.add(UpdateSiocInfoCmd.class); + + return cmdList; + } +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManager.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManager.java new file mode 100644 index 00000000000..1bbfbc8f041 --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManager.java @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.sioc; + +public interface SiocManager { + void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int limitIopsPerGB, int iopsNotifyThreshold) throws Exception; +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManagerImpl.java new file mode 100644 index 00000000000..966c83722ec --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -0,0 +1,463 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.sioc; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.util.LoginInfo; +import org.apache.cloudstack.util.vmware.VMwareUtil; +import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.hypervisor.vmware.VmwareDatacenterVO; +import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMapVO; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; +import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.db.GlobalLock; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.SharesInfo; +import com.vmware.vim25.SharesLevel; +import com.vmware.vim25.StorageIOAllocationInfo; +import com.vmware.vim25.VirtualDevice; +import com.vmware.vim25.VirtualDeviceConfigSpec; +import com.vmware.vim25.VirtualDeviceConfigSpecOperation; +import com.vmware.vim25.VirtualDisk; +import com.vmware.vim25.VirtualDeviceFileBackingInfo; +import com.vmware.vim25.VirtualMachineConfigInfo; +import com.vmware.vim25.VirtualMachineConfigSpec; + +@Component +public class SiocManagerImpl implements SiocManager { + private static final Logger LOGGER = Logger.getLogger(SiocManagerImpl.class); + private static final int LOCK_TIME_IN_SECONDS = 3; + private static final int ONE_GB_IN_BYTES = 1000000000; + private static final int LOWEST_SHARES_PER_VIRTUAL_DISK = 2000; // We want this to be greater than 1,000, which is the VMware default value. + private static final int HIGHEST_SHARES_PER_VIRTUAL_DISK = 4000; // VMware limit + private static final int LOWEST_LIMIT_IOPS_PER_VIRTUAL_DISK = 16; // VMware limit + private static final int HIGHEST_LIMIT_IOPS_PER_VIRTUAL_DISK = 2147483647; // VMware limit + + @Inject private DataCenterDao zoneDao; + @Inject private DiskOfferingDao diskOfferingDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject private VMInstanceDao vmInstanceDao; + @Inject private VmwareDatacenterDao vmwareDcDao; + @Inject private VmwareDatacenterZoneMapDao vmwareDcZoneMapDao; + @Inject private VolumeDao volumeDao; + + @Override + public void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int limitIopsPerGB, int iopsNotifyThreshold) throws Exception { + LOGGER.info("'SiocManagerImpl.updateSiocInfo(long, long, int, int, int)' method invoked"); + + DataCenterVO zone = zoneDao.findById(zoneId); + + if (zone == null) { + throw new Exception("Error: No zone could be located for the following zone ID: " + zoneId + "."); + } + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + if (storagePool == null) { + throw new Exception("Error: No storage pool could be located for the following pool ID: " + storagePoolId + "."); + } + + if (storagePool.getDataCenterId() != zoneId) { + throw new Exception("Error: Storage pool '" + storagePool.getName() + "' is not in zone ID " + zoneId + "."); + } + + if (!storagePool.getPoolType().equals(StoragePoolType.VMFS)) { + throw new Exception("Error: Storage pool '" + storagePool.getName() + "' does not represent a VMFS datastore."); + } + + String lockName = zone.getUuid() + "-" + storagePool.getUuid(); + GlobalLock lock = GlobalLock.getInternLock(lockName); + + if (!lock.lock(LOCK_TIME_IN_SECONDS)) { + throw new Exception("Busy: The system is already processing this request."); + } + + VMwareUtil.VMwareConnection connection = null; + + try { + connection = VMwareUtil.getVMwareConnection(getLoginInfo(zoneId)); + + Map nameToVm = VMwareUtil.getVms(connection); + + List allTasks = new ArrayList<>(); + + int limitIopsTotal = 0; + + List volumes = volumeDao.findByPoolId(storagePoolId, null); + + if (volumes != null && volumes.size() > 0) { + Set instanceIds = new HashSet<>(); + + for (VolumeVO volume : volumes) { + Long instanceId = volume.getInstanceId(); + + if (instanceId != null) { + instanceIds.add(instanceId); + } + } + + for (Long instanceId : instanceIds) { + ResultWrapper resultWrapper = updateSiocInfo(connection, nameToVm, instanceId, storagePool, sharesPerGB, limitIopsPerGB); + + limitIopsTotal += resultWrapper.getLimitIopsTotal(); + + allTasks.addAll(resultWrapper.getTasks()); + } + } + /* + Set vmNames = nameToVm.keySet(); + + for (String vmName : vmNames) { + // If the VM's name doesn't start with "i-", then it should be a worker VM (which is not stored in the CloudStack datastore). + if (!vmName.startsWith("i-")) { + ResultWrapper resultWrapper = updateSiocInfoForWorkerVM(connection, nameToVm.get(vmName), + getDatastoreName(storagePool.getPath()), limitIopsPerGB); + + limitIopsTotal += resultWrapper.getLimitIopsTotal(); + + allTasks.addAll(resultWrapper.getTasks()); + } + } + */ + for (ManagedObjectReference task : allTasks) { + VMwareUtil.waitForTask(connection, task); + } + + if (limitIopsTotal > iopsNotifyThreshold) { + throw new Exception("Warning: Total number of IOPS: " + limitIopsTotal + "; IOPS notify threshold: " + iopsNotifyThreshold); + } + } + finally { + VMwareUtil.closeVMwareConnection(connection); + + lock.unlock(); + lock.releaseRef(); + } + } + + private ResultWrapper updateSiocInfo(VMwareUtil.VMwareConnection connection, Map nameToVm, Long instanceId, + StoragePoolVO storagePool, int sharesPerGB, int limitIopsPerGB) throws Exception { + int limitIopsTotal = 0; + List tasks = new ArrayList<>(); + + VMInstanceVO vmInstance = vmInstanceDao.findById(instanceId); + + if (vmInstance == null) { + String errMsg = "Error: The VM with ID " + instanceId + " could not be located."; + + throw new Exception(errMsg); + } + + String vmName = vmInstance.getInstanceName(); + + ManagedObjectReference morVm = nameToVm.get(vmName); + + if (morVm == null) { + String errMsg = "Error: The VM with ID " + instanceId + " could not be located (ManagedObjectReference)."; + + throw new Exception(errMsg); + } + + VirtualMachineConfigInfo vmci = (VirtualMachineConfigInfo)VMwareUtil.getEntityProps(connection, morVm, + new String[] { "config" }).get("config"); + List devices = vmci.getHardware().getDevice(); + + for (VirtualDevice device : devices) { + if (device instanceof VirtualDisk) { + VirtualDisk disk = (VirtualDisk)device; + + VolumeVO volumeVO = getVolumeFromVirtualDisk(vmInstance, storagePool.getId(), devices, disk); + + if (volumeVO != null) { + boolean diskUpdated = false; + + StorageIOAllocationInfo sioai = disk.getStorageIOAllocation(); + + SharesInfo sharesInfo = sioai.getShares(); + + int currentShares = sharesInfo.getShares(); + int newShares = getNewSharesBasedOnVolumeSize(volumeVO, sharesPerGB); + + if (currentShares != newShares) { + sharesInfo.setLevel(SharesLevel.CUSTOM); + sharesInfo.setShares(newShares); + + diskUpdated = true; + } + + long currentLimitIops = sioai.getLimit() != null ? sioai.getLimit() : Long.MIN_VALUE; + long newLimitIops = getNewLimitIopsBasedOnVolumeSize(volumeVO, limitIopsPerGB); + + limitIopsTotal += newLimitIops; + + if (currentLimitIops != newLimitIops) { + sioai.setLimit(newLimitIops); + + diskUpdated = true; + } + + if (diskUpdated) { + VirtualDeviceConfigSpec vdcs = new VirtualDeviceConfigSpec(); + + vdcs.setDevice(disk); + vdcs.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + VirtualMachineConfigSpec vmcs = new VirtualMachineConfigSpec(); + + vmcs.getDeviceChange().add(vdcs); + + try { + ManagedObjectReference task = VMwareUtil.reconfigureVm(connection, morVm, vmcs); + + tasks.add(task); + + LOGGER.info(getInfoMsg(volumeVO, newShares, newLimitIops)); + } catch (Exception ex) { + throw new Exception("Error: " + ex.getMessage()); + } + } + } + } + } + + return new ResultWrapper(limitIopsTotal, tasks); + } + + private String getDatastoreName(String path) throws Exception { + String searchString = "/"; + + int lastIndexOf = path.lastIndexOf(searchString); + + if (lastIndexOf == -1) { + throw new Exception("Error: Invalid datastore path"); + } + + return path.substring(lastIndexOf + searchString.length()); + } + + private ResultWrapper updateSiocInfoForWorkerVM(VMwareUtil.VMwareConnection connection, ManagedObjectReference morVm, String datastoreName, + int limitIopsPerGB) throws Exception { + int limitIopsTotal = 0; + List tasks = new ArrayList<>(); + + VirtualMachineConfigInfo vmci = (VirtualMachineConfigInfo)VMwareUtil.getEntityProps(connection, morVm, + new String[] { "config" }).get("config"); + List devices = vmci.getHardware().getDevice(); + + for (VirtualDevice device : devices) { + if (device instanceof VirtualDisk) { + VirtualDisk disk = (VirtualDisk)device; + + if (disk.getBacking() instanceof VirtualDeviceFileBackingInfo) { + VirtualDeviceFileBackingInfo backingInfo = (VirtualDeviceFileBackingInfo)disk.getBacking(); + + if (backingInfo.getFileName().contains(datastoreName)) { + boolean diskUpdated = false; + + StorageIOAllocationInfo sioai = disk.getStorageIOAllocation(); + + long currentLimitIops = sioai.getLimit() != null ? sioai.getLimit() : Long.MIN_VALUE; + long newLimitIops = getNewLimitIopsBasedOnVolumeSize(disk.getCapacityInBytes(), limitIopsPerGB); + + limitIopsTotal += newLimitIops; + + if (currentLimitIops != newLimitIops) { + sioai.setLimit(newLimitIops); + + diskUpdated = true; + } + + if (diskUpdated) { + VirtualDeviceConfigSpec vdcs = new VirtualDeviceConfigSpec(); + + vdcs.setDevice(disk); + vdcs.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + VirtualMachineConfigSpec vmcs = new VirtualMachineConfigSpec(); + + vmcs.getDeviceChange().add(vdcs); + + try { + ManagedObjectReference task = VMwareUtil.reconfigureVm(connection, morVm, vmcs); + + tasks.add(task); + + LOGGER.info(getInfoMsgForWorkerVm(newLimitIops)); + } catch (Exception ex) { + throw new Exception("Error: " + ex.getMessage()); + } + } + } + } + } + } + + return new ResultWrapper(limitIopsTotal, tasks); + } + + private String getInfoMsg(Volume volume, Integer newShares, Long newLimitIops) { + String msgPrefix = "VMware SIOC: Volume = " + volume.getName(); + + String msgNewShares = newShares != null ? "; New Shares = " + newShares : ""; + + String msgNewLimitIops = newLimitIops != null ? "; New Limit IOPS = " + newLimitIops : ""; + + return msgPrefix + msgNewShares + msgNewLimitIops; + } + + private String getInfoMsgForWorkerVm(Long newLimitIops) { + return "VMware SIOC: Worker VM's Limit IOPS set to " + newLimitIops; + } + + private VolumeVO getVolumeFromVirtualDisk(VMInstanceVO vmInstance, long storagePoolId, List allDevices, + VirtualDisk disk) throws Exception { + List volumes = volumeDao.findByInstance(vmInstance.getId()); + + if (volumes == null || volumes.size() == 0) { + String errMsg = "Error: The VMware virtual disk '" + disk + "' could not be mapped to a CloudStack volume. " + + "There were no volumes for the VM with the following ID: " + vmInstance.getId() + "."; + + throw new Exception(errMsg); + } + + VirtualMachineDiskInfoBuilder diskInfoBuilder = VMwareUtil.getDiskInfoBuilder(allDevices); + + for (VolumeVO volume : volumes) { + Long poolId = volume.getPoolId(); + + if (poolId != null && poolId == storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(poolId); + String path = storagePool.getPath(); + String charToSearchFor = "/"; + int index = path.lastIndexOf(charToSearchFor) + charToSearchFor.length(); + String datastoreName = path.substring(index); + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(volume.getPath(), datastoreName); + + if (diskInfo != null) { + String deviceBusName = VMwareUtil.getDeviceBusName(allDevices, disk); + + if (deviceBusName.equals(diskInfo.getDiskDeviceBusName())) { + return volume; + } + } + } + } + + return null; + } + + private int getNewSharesBasedOnVolumeSize(VolumeVO volumeVO, int sharesPerGB) { + long volumeSizeInBytes = getVolumeSizeInBytes(volumeVO); + + double sizeInGB = volumeSizeInBytes / (double)ONE_GB_IN_BYTES; + + int shares = LOWEST_SHARES_PER_VIRTUAL_DISK + ((int)(sharesPerGB * sizeInGB)); + + return getAdjustedShares(shares); + } + + private int getAdjustedShares(int shares) { + shares = Math.max(shares, LOWEST_SHARES_PER_VIRTUAL_DISK); + shares = Math.min(shares, HIGHEST_SHARES_PER_VIRTUAL_DISK); + + return shares; + } + + private long getNewLimitIopsBasedOnVolumeSize(VolumeVO volumeVO, int limitIopsPerGB) { + long volumeSizeInBytes = getVolumeSizeInBytes(volumeVO); + + return getNewLimitIopsBasedOnVolumeSize(volumeSizeInBytes, limitIopsPerGB); + } + + private long getNewLimitIopsBasedOnVolumeSize(Long volumeSizeInBytes, int limitIopsPerGB) { + if (volumeSizeInBytes == null) { + volumeSizeInBytes = (long)ONE_GB_IN_BYTES; + } + + double sizeInGB = volumeSizeInBytes / (double)ONE_GB_IN_BYTES; + + long limitIops = (long)(limitIopsPerGB * sizeInGB); + + return getAdjustedLimitIops(limitIops); + } + + private long getAdjustedLimitIops(long limitIops) { + limitIops = Math.max(limitIops, LOWEST_LIMIT_IOPS_PER_VIRTUAL_DISK); + limitIops = Math.min(limitIops, HIGHEST_LIMIT_IOPS_PER_VIRTUAL_DISK); + + return limitIops; + } + + private long getVolumeSizeInBytes(VolumeVO volumeVO) { + return volumeVO.getSize() != null && volumeVO.getSize() > ONE_GB_IN_BYTES ? volumeVO.getSize() : ONE_GB_IN_BYTES; + } + + private LoginInfo getLoginInfo(long zoneId) { + VmwareDatacenterZoneMapVO vmwareDcZoneMap = vmwareDcZoneMapDao.findByZoneId(zoneId); + Long associatedVmwareDcId = vmwareDcZoneMap.getVmwareDcId(); + VmwareDatacenterVO associatedVmwareDc = vmwareDcDao.findById(associatedVmwareDcId); + + String host = associatedVmwareDc.getVcenterHost(); + String username = associatedVmwareDc.getUser(); + String password = associatedVmwareDc.getPassword(); + + return new LoginInfo(host, username, password); + } +} + +class ResultWrapper { + private int limitIopsTotal; + private List tasks; + + ResultWrapper(int limitIopsTotal, List tasks) { + this.limitIopsTotal = limitIopsTotal; + this.tasks = tasks; + } + + int getLimitIopsTotal() { + return limitIopsTotal; + } + + List getTasks() { + return tasks; + } +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/LoginInfo.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/LoginInfo.java new file mode 100644 index 00000000000..192996698c9 --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/LoginInfo.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.util; + +public class LoginInfo { + private final String _host; + private final String _username; + private final String _password; + + public LoginInfo(String host, String username, String password) { + _host = host; + _username = username; + _password = password; + } + + public String getHost() { + return _host; + } + + public String getUsername() { + return _username; + } + + public String getPassword() { + return _password; + } +} diff --git a/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/vmware/VMwareUtil.java b/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/vmware/VMwareUtil.java new file mode 100644 index 00000000000..209945fa471 --- /dev/null +++ b/plugins/api/vmware-sioc/src/org/apache/cloudstack/util/vmware/VMwareUtil.java @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.util.vmware; + +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSessionContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; +import javax.xml.ws.BindingProvider; +import javax.xml.ws.WebServiceException; + +import org.apache.cloudstack.util.LoginInfo; +import org.apache.log4j.Logger; + +import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; +import com.vmware.vim25.DynamicProperty; +import com.vmware.vim25.InvalidCollectorVersionFaultMsg; +import com.vmware.vim25.InvalidPropertyFaultMsg; +import com.vmware.vim25.LocalizedMethodFault; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.ObjectContent; +import com.vmware.vim25.ObjectSpec; +import com.vmware.vim25.ObjectUpdate; +import com.vmware.vim25.ObjectUpdateKind; +import com.vmware.vim25.PropertyChange; +import com.vmware.vim25.PropertyChangeOp; +import com.vmware.vim25.PropertyFilterSpec; +import com.vmware.vim25.PropertyFilterUpdate; +import com.vmware.vim25.PropertySpec; +import com.vmware.vim25.RetrieveOptions; +import com.vmware.vim25.RetrieveResult; +import com.vmware.vim25.RuntimeFaultFaultMsg; +import com.vmware.vim25.SelectionSpec; +import com.vmware.vim25.ServiceContent; +import com.vmware.vim25.TaskInfoState; +import com.vmware.vim25.TraversalSpec; +import com.vmware.vim25.UpdateSet; +import com.vmware.vim25.VimPortType; +import com.vmware.vim25.VimService; +import com.vmware.vim25.VirtualDevice; +import com.vmware.vim25.VirtualDeviceBackingInfo; +import com.vmware.vim25.VirtualDisk; +import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; +import com.vmware.vim25.VirtualIDEController; +import com.vmware.vim25.VirtualMachineConfigSpec; +import com.vmware.vim25.VirtualSCSIController; + +public class VMwareUtil { + private static final Logger s_logger = Logger.getLogger(VMwareUtil.class); + + private VMwareUtil() {} + + public static class VMwareConnection { + private VimPortType _vimPortType; + private ServiceContent _serviceContent; + + VMwareConnection(VimPortType vimPortType, ServiceContent serviceContent) { + _vimPortType = vimPortType; + _serviceContent = serviceContent; + } + + VimPortType getVimPortType() { + return _vimPortType; + } + + ServiceContent getServiceContent() { + return _serviceContent; + } + } + + public static VMwareConnection getVMwareConnection(LoginInfo loginInfo) throws Exception { + trustAllHttpsCertificates(); + + HostnameVerifier hv = new HostnameVerifier() { + @Override + public boolean verify(String urlHostName, SSLSession session) { + return true; + } + }; + + HttpsURLConnection.setDefaultHostnameVerifier(hv); + + ManagedObjectReference serviceInstanceRef = new ManagedObjectReference(); + + final String serviceInstanceName = "ServiceInstance"; + + serviceInstanceRef.setType(serviceInstanceName); + serviceInstanceRef.setValue(serviceInstanceName); + + VimService vimService = new VimService(); + + VimPortType vimPortType = vimService.getVimPort(); + + Map ctxt = ((BindingProvider)vimPortType).getRequestContext(); + + ctxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, "https://" + loginInfo.getHost() + "/sdk"); + ctxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true); + + ServiceContent serviceContent = vimPortType.retrieveServiceContent(serviceInstanceRef); + + vimPortType.login(serviceContent.getSessionManager(), loginInfo.getUsername(), loginInfo.getPassword(), null); + + return new VMwareConnection(vimPortType, serviceContent); + } + + public static void closeVMwareConnection(VMwareConnection connection) throws Exception { + if (connection != null) { + connection.getVimPortType().logout(connection.getServiceContent().getSessionManager()); + } + } + + public static Map getVms(VMwareConnection connection) throws Exception { + Map nameToVm = new HashMap<>(); + + ManagedObjectReference rootFolder = connection.getServiceContent().getRootFolder(); + + TraversalSpec tSpec = getVMTraversalSpec(); + + PropertySpec propertySpec = new PropertySpec(); + + propertySpec.setAll(Boolean.FALSE); + propertySpec.getPathSet().add("name"); + propertySpec.setType("VirtualMachine"); + + ObjectSpec objectSpec = new ObjectSpec(); + + objectSpec.setObj(rootFolder); + objectSpec.setSkip(Boolean.TRUE); + objectSpec.getSelectSet().add(tSpec); + + PropertyFilterSpec propertyFilterSpec = new PropertyFilterSpec(); + + propertyFilterSpec.getPropSet().add(propertySpec); + propertyFilterSpec.getObjectSet().add(objectSpec); + + List lstPfs = new ArrayList<>(1); + + lstPfs.add(propertyFilterSpec); + + VimPortType vimPortType = connection.getVimPortType(); + ManagedObjectReference propertyCollector = connection.getServiceContent().getPropertyCollector(); + + List lstObjectContent = retrievePropertiesAllObjects(lstPfs, vimPortType, propertyCollector); + + if (lstObjectContent != null) { + for (ObjectContent oc : lstObjectContent) { + ManagedObjectReference mor = oc.getObj(); + List dps = oc.getPropSet(); + String vmName = null; + + if (dps != null) { + for (DynamicProperty dp : dps) { + vmName = (String)dp.getVal(); + } + } + + if (vmName != null) { + nameToVm.put(vmName, mor); + } + } + } + + return nameToVm; + } + + public static Map getEntityProps(VMwareConnection connection, ManagedObjectReference entityMor, String[] props) + throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg { + Map retVal = new HashMap<>(); + + PropertySpec propertySpec = new PropertySpec(); + + propertySpec.setAll(Boolean.FALSE); + propertySpec.setType(entityMor.getType()); + propertySpec.getPathSet().addAll(Arrays.asList(props)); + + ObjectSpec objectSpec = new ObjectSpec(); + + objectSpec.setObj(entityMor); + + // Create PropertyFilterSpec using the PropertySpec and ObjectPec created above. + PropertyFilterSpec propertyFilterSpec = new PropertyFilterSpec(); + + propertyFilterSpec.getPropSet().add(propertySpec); + propertyFilterSpec.getObjectSet().add(objectSpec); + + List propertyFilterSpecs = new ArrayList<>(); + + propertyFilterSpecs.add(propertyFilterSpec); + + RetrieveResult rslts = connection.getVimPortType().retrievePropertiesEx(connection.getServiceContent().getPropertyCollector(), + propertyFilterSpecs, new RetrieveOptions()); + List listobjcontent = new ArrayList<>(); + + if (rslts != null && rslts.getObjects() != null && !rslts.getObjects().isEmpty()) { + listobjcontent.addAll(rslts.getObjects()); + } + + String token = null; + + if (rslts != null && rslts.getToken() != null) { + token = rslts.getToken(); + } + + while (token != null && !token.isEmpty()) { + rslts = connection.getVimPortType().continueRetrievePropertiesEx(connection.getServiceContent().getPropertyCollector(), + token); + + token = null; + + if (rslts != null) { + token = rslts.getToken(); + + if (rslts.getObjects() != null && !rslts.getObjects().isEmpty()) { + listobjcontent.addAll(rslts.getObjects()); + } + } + } + + for (ObjectContent oc : listobjcontent) { + List dps = oc.getPropSet(); + + if (dps != null) { + for (DynamicProperty dp : dps) { + retVal.put(dp.getName(), dp.getVal()); + } + } + } + + return retVal; + } + + public static ManagedObjectReference reconfigureVm(VMwareConnection connection, ManagedObjectReference morVm, + VirtualMachineConfigSpec vmcs) throws Exception { + return connection.getVimPortType().reconfigVMTask(morVm, vmcs); + } + + public static VirtualMachineDiskInfoBuilder getDiskInfoBuilder(List devices) throws Exception { + VirtualMachineDiskInfoBuilder builder = new VirtualMachineDiskInfoBuilder(); + + if (devices != null) { + for (VirtualDevice device : devices) { + if (device instanceof VirtualDisk) { + VirtualDisk virtualDisk = (VirtualDisk)device; + VirtualDeviceBackingInfo backingInfo = virtualDisk.getBacking(); + + if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { + VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; + + String deviceBusName = VMwareUtil.getDeviceBusName(devices, virtualDisk); + + while (diskBackingInfo != null) { + builder.addDisk(deviceBusName, diskBackingInfo.getFileName()); + + diskBackingInfo = diskBackingInfo.getParent(); + } + } + } + } + } + + return builder; + } + + public static String getDeviceBusName(List allDevices, VirtualDisk disk) throws Exception { + for (VirtualDevice device : allDevices) { + if (device.getKey() == disk.getControllerKey()) { + if (device instanceof VirtualIDEController) { + return String.format("ide%d:%d", ((VirtualIDEController)device).getBusNumber(), disk.getUnitNumber()); + } else if (device instanceof VirtualSCSIController) { + return String.format("scsi%d:%d", ((VirtualSCSIController)device).getBusNumber(), disk.getUnitNumber()); + } else { + throw new Exception("The device controller is not supported."); + } + } + } + + throw new Exception("The device controller could not be located."); + } + + public static boolean waitForTask(VMwareConnection connection, ManagedObjectReference task) throws Exception { + try { + Object[] result = waitForValues(connection, task, new String[] { "info.state", "info.error" }, new String[] { "state" }, + new Object[][] { new Object[] { TaskInfoState.SUCCESS, TaskInfoState.ERROR } }); + + if (result[0].equals(TaskInfoState.SUCCESS)) { + return true; + } + + if (result[1] instanceof LocalizedMethodFault) { + throw new Exception(((LocalizedMethodFault)result[1]).getLocalizedMessage()); + } + } catch (WebServiceException we) { + s_logger.debug("Cancelling vCenter task because the task failed with the following error: " + we.getLocalizedMessage()); + + connection.getVimPortType().cancelTask(task); + + throw new Exception("The vCenter task failed due to the following error: " + we.getLocalizedMessage()); + } + + return false; + } + + private static Object[] waitForValues(VMwareConnection connection, ManagedObjectReference morObj, String[] filterProps, + String[] endWaitProps, Object[][] expectedVals) throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg, + InvalidCollectorVersionFaultMsg { + String version = ""; + Object[] endVals = new Object[endWaitProps.length]; + Object[] filterVals = new Object[filterProps.length]; + + PropertyFilterSpec spec = new PropertyFilterSpec(); + + ObjectSpec oSpec = new ObjectSpec(); + + oSpec.setObj(morObj); + oSpec.setSkip(Boolean.FALSE); + + spec.getObjectSet().add(oSpec); + + PropertySpec pSpec = new PropertySpec(); + + pSpec.getPathSet().addAll(Arrays.asList(filterProps)); + pSpec.setType(morObj.getType()); + + spec.getPropSet().add(pSpec); + + ManagedObjectReference propertyCollector = connection.getServiceContent().getPropertyCollector(); + ManagedObjectReference filterSpecRef = connection.getVimPortType().createFilter(propertyCollector, spec, true); + + boolean reached = false; + + UpdateSet updateSet; + List lstPropertyFilterUpdates; + List lstObjectUpdates; + List lstPropertyChanges; + + while (!reached) { + updateSet = connection.getVimPortType().waitForUpdates(propertyCollector, version); + + if (updateSet == null || updateSet.getFilterSet() == null) { + continue; + } + + version = updateSet.getVersion(); + + lstPropertyFilterUpdates = updateSet.getFilterSet(); + + for (PropertyFilterUpdate propertyFilterUpdate : lstPropertyFilterUpdates) { + lstObjectUpdates = propertyFilterUpdate.getObjectSet(); + + for (ObjectUpdate objUpdate : lstObjectUpdates) { + if (objUpdate.getKind() == ObjectUpdateKind.MODIFY || objUpdate.getKind() == ObjectUpdateKind.ENTER || + objUpdate.getKind() == ObjectUpdateKind.LEAVE) { + lstPropertyChanges = objUpdate.getChangeSet(); + + for (PropertyChange propchg : lstPropertyChanges) { + updateValues(endWaitProps, endVals, propchg); + updateValues(filterProps, filterVals, propchg); + } + } + } + } + + Object expectedValue; + + // Check if the expected values have been reached and exit the loop if done. + // Also, exit the WaitForUpdates loop if this is the case. + for (int chgi = 0; chgi < endVals.length && !reached; chgi++) { + for (int vali = 0; vali < expectedVals[chgi].length && !reached; vali++) { + expectedValue = expectedVals[chgi][vali]; + + reached = expectedValue.equals(endVals[chgi]) || reached; + } + } + } + + // Destroy the filter when we are done. + connection.getVimPortType().destroyPropertyFilter(filterSpecRef); + + return filterVals; + } + + private static void updateValues(String[] props, Object[] vals, PropertyChange propertyChange) { + for (int findi = 0; findi < props.length; findi++) { + if (propertyChange.getName().lastIndexOf(props[findi]) >= 0) { + if (propertyChange.getOp() == PropertyChangeOp.REMOVE) { + vals[findi] = ""; + } else { + vals[findi] = propertyChange.getVal(); + } + } + } + } + + private static List retrievePropertiesAllObjects(List lstPfs, + VimPortType vimPortType, ManagedObjectReference propCollectorRef) throws Exception { + List lstObjectContent = new ArrayList<>(); + + RetrieveOptions retrieveOptions = new RetrieveOptions(); + + RetrieveResult rslts = vimPortType.retrievePropertiesEx(propCollectorRef, lstPfs, retrieveOptions); + + if (rslts != null && rslts.getObjects() != null && rslts.getObjects().size() > 0) { + List lstOc = new ArrayList<>(); + + for (ObjectContent oc : rslts.getObjects()) { + lstOc.add(oc); + } + + lstObjectContent.addAll(lstOc); + } + + String token = null; + + if (rslts != null && rslts.getToken() != null) { + token = rslts.getToken(); + } + + while (token != null && !token.isEmpty()) { + rslts = vimPortType.continueRetrievePropertiesEx(propCollectorRef, token); + token = null; + + if (rslts != null) { + token = rslts.getToken(); + + if (rslts.getObjects() != null && rslts.getObjects().size() > 0) { + List lstOc = new ArrayList<>(); + + for (ObjectContent oc : rslts.getObjects()) { + lstOc.add(oc); + } + + lstObjectContent.addAll(lstOc); + } + } + } + + return lstObjectContent; + } + + private static TraversalSpec getVMTraversalSpec() { + // Create a TraversalSpec that starts from the 'root' objects + // and traverses the inventory tree to get to the VirtualMachines. + // Build the traversal specs bottoms up + + // TraversalSpec to get to the VM in a vApp + TraversalSpec vAppToVM = new TraversalSpec(); + + vAppToVM.setName("vAppToVM"); + vAppToVM.setType("VirtualApp"); + vAppToVM.setPath("vm"); + + // TraversalSpec for vApp to vApp + TraversalSpec vAppToVApp = new TraversalSpec(); + + vAppToVApp.setName("vAppToVApp"); + vAppToVApp.setType("VirtualApp"); + vAppToVApp.setPath("resourcePool"); + + // SelectionSpec for vApp-to-vApp recursion + SelectionSpec vAppRecursion = new SelectionSpec(); + + vAppRecursion.setName("vAppToVApp"); + + // SelectionSpec to get to a VM in the vApp + SelectionSpec vmInVApp = new SelectionSpec(); + + vmInVApp.setName("vAppToVM"); + + // SelectionSpec for both vApp to vApp and vApp to VM + List vAppToVMSS = new ArrayList<>(); + + vAppToVMSS.add(vAppRecursion); + vAppToVMSS.add(vmInVApp); + + vAppToVApp.getSelectSet().addAll(vAppToVMSS); + + // This SelectionSpec is used for recursion for Folder recursion + SelectionSpec sSpec = new SelectionSpec(); + + sSpec.setName("VisitFolders"); + + // Traversal to get to the vmFolder from DataCenter + TraversalSpec dataCenterToVMFolder = new TraversalSpec(); + + dataCenterToVMFolder.setName("DataCenterToVMFolder"); + dataCenterToVMFolder.setType("Datacenter"); + dataCenterToVMFolder.setPath("vmFolder"); + dataCenterToVMFolder.setSkip(false); + + dataCenterToVMFolder.getSelectSet().add(sSpec); + + // TraversalSpec to get to the DataCenter from rootFolder + TraversalSpec traversalSpec = new TraversalSpec(); + + traversalSpec.setName("VisitFolders"); + traversalSpec.setType("Folder"); + traversalSpec.setPath("childEntity"); + traversalSpec.setSkip(false); + + List sSpecArr = new ArrayList<>(); + + sSpecArr.add(sSpec); + sSpecArr.add(dataCenterToVMFolder); + sSpecArr.add(vAppToVM); + sSpecArr.add(vAppToVApp); + + traversalSpec.getSelectSet().addAll(sSpecArr); + + return traversalSpec; + } + + private static void trustAllHttpsCertificates() throws Exception { + // Create a trust manager that does not validate certificate chains: + TrustManager[] trustAllCerts = new TrustManager[1]; + + TrustManager tm = new TrustAllTrustManager(); + + trustAllCerts[0] = tm; + + SSLContext sc = SSLContext.getInstance("SSL"); + + SSLSessionContext sslsc = sc.getServerSessionContext(); + + sslsc.setSessionTimeout(0); + + sc.init(null, trustAllCerts, null); + + HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); + } + + private static class TrustAllTrustManager implements TrustManager, X509TrustManager { + @Override + public X509Certificate[] getAcceptedIssuers() { + return null; + } + + @Override + public void checkServerTrusted(X509Certificate[] certs, String authType) throws CertificateException { + } + + @Override + public void checkClientTrusted(X509Certificate[] certs, String authType) throws CertificateException { + } + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 64285841ff7..9b7fb2ea013 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2367,6 +2367,10 @@ private void createVif(final LibvirtVMDef vm, final NicTO nic, final String nicA vm.getDevices().addDevice(getVifDriver(nic.getType(), nic.getName()).plug(nic, vm.getPlatformEmulator(), nicAdapter)); } + public boolean cleanupDisk(Map volumeToDisconnect) { + return _storagePoolMgr.disconnectPhysicalDisk(volumeToDisconnect); + } + public boolean cleanupDisk(final DiskDef disk) { final String path = disk.getDiskPath(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java index 2df6c651e3d..4b2afa6a59b 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java @@ -33,24 +33,39 @@ private String dxml = ""; private String vmName = ""; private String destIp = ""; + private boolean migrateStorage; + private boolean autoConvergence; - public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml, final String vmName, final String destIp) { + public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml, + final boolean migrateStorage, final boolean autoConvergence, final String vmName, final String destIp) { this.libvirtComputingResource = libvirtComputingResource; this.dm = dm; this.dconn = dconn; this.dxml = dxml; + this.migrateStorage = migrateStorage; + this.autoConvergence = autoConvergence; this.vmName = vmName; this.destIp = destIp; } @Override public Domain call() throws LibvirtException { - // set compression flag for migration if libvirt version supports it - if (dconn.getLibVirVersion() < 1003000) { - return dm.migrate(dconn, 1 << 0, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed()); - } else { - return dm.migrate(dconn, 1 << 0|1 << 11, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed()); + long flags = 1 << 0; + + // set compression flag for migration, if libvirt version supports it + if (dconn.getLibVirVersion() >= 1000003) { + flags |= 1 << 11; + } + + if (migrateStorage) { + flags |= 1 << 6; } + + if (autoConvergence && dconn.getLibVirVersion() >= 1002003) { + flags |= 1 << 13; + } + + return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed()); } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java index b2248b99062..0795abf0688 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java @@ -20,10 +20,12 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import java.io.File; +import java.util.Map; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; @@ -33,8 +35,13 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + @ResourceWrapper(handles = CopyVolumeCommand.class) public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtCopyVolumeCommandWrapper.class); @Override public Answer execute(final CopyVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -46,20 +53,30 @@ public Answer execute(final CopyVolumeCommand command, final LibvirtComputingRes * ManagementServerImpl shows that it always sets copyToSecondary to * true */ + + Map srcDetails = command.getSrcDetails(); + + if (srcDetails != null) { + return handleCopyDataFromVolumeToSecondaryStorageUsingSrcDetails(command, libvirtComputingResource); + } + + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + final boolean copyToSecondary = command.toSecondaryStorage(); - String volumePath = command.getVolumePath(); final StorageFilerTO pool = command.getPool(); final String secondaryStorageUrl = command.getSecondaryStorageURL(); + KVMStoragePool secondaryStoragePool = null; - KVMStoragePool primaryPool = null; + String volumePath; + KVMStoragePool primaryPool; - final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); try { try { primaryPool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid()); } catch (final CloudRuntimeException e) { if (e.getMessage().contains("not found")) { - primaryPool = storagePoolMgr.createStoragePool(pool.getUuid(), pool.getHost(), pool.getPort(), pool.getPath(), pool.getUserInfo(), pool.getType()); + primaryPool = storagePoolMgr.createStoragePool(pool.getUuid(), pool.getHost(), pool.getPort(), pool.getPath(), + pool.getUserInfo(), pool.getType()); } else { return new CopyVolumeAnswer(command, false, e.getMessage(), null, null); } @@ -85,6 +102,7 @@ public Answer execute(final CopyVolumeCommand command, final LibvirtComputingRes secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + volumePath); final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(command.getVolumePath() + ".qcow2"); + storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, 0); return new CopyVolumeAnswer(command, true, null, null, volumeName); @@ -97,4 +115,61 @@ public Answer execute(final CopyVolumeCommand command, final LibvirtComputingRes } } } -} \ No newline at end of file + + private Answer handleCopyDataFromVolumeToSecondaryStorageUsingSrcDetails(CopyVolumeCommand command, LibvirtComputingResource libvirtComputingResource) { + KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + PrimaryDataStoreTO srcPrimaryDataStore = null; + KVMStoragePool secondaryStoragePool = null; + + Map srcDetails = command.getSrcDetails(); + + String srcPath = srcDetails.get(DiskTO.IQN); + + if (srcPath == null) { + return new CopyVolumeAnswer(command, false, "No IQN was specified", null, null); + } + + try { + LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); + String destVolumeName = libvirtUtilitiesHelper.generateUUIDName() + ".qcow2"; + String destVolumePath = command.getVolumePath() + File.separator; + + String secondaryStorageUrl = command.getSecondaryStorageURL(); + + secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl); + + secondaryStoragePool.createFolder(File.separator + destVolumePath); + + storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid()); + + secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + destVolumePath); + + VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData(); + + srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore(); + + storagePoolMgr.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails); + + KVMPhysicalDisk srcPhysicalDisk = storagePoolMgr.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); + + storagePoolMgr.copyPhysicalDisk(srcPhysicalDisk, destVolumeName, secondaryStoragePool, command.getWait() * 1000); + + return new CopyVolumeAnswer(command, true, null, null, destVolumePath + destVolumeName); + } catch (final CloudRuntimeException e) { + return new CopyVolumeAnswer(command, false, e.toString(), null, null); + } finally { + try { + if (srcPrimaryDataStore != null) { + storagePoolMgr.disconnectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); + } + } + catch (Exception e) { + LOGGER.warn("Unable to disconnect from the source device.", e); + } + + if (secondaryStoragePool != null) { + storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid()); + } + } + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteStoragePoolCommandWrapper.java index 12ba874cf61..08e414a106b 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteStoragePoolCommandWrapper.java @@ -30,13 +30,17 @@ @ResourceWrapper(handles = DeleteStoragePoolCommand.class) public final class LibvirtDeleteStoragePoolCommandWrapper extends CommandWrapper { - @Override public Answer execute(final DeleteStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) { try { - final StorageFilerTO pool = command.getPool(); - final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); - storagePoolMgr.deleteStoragePool(pool.getType(), pool.getUuid()); + // if getRemoveDatastore() is true, then we are dealing with managed storage and can skip the delete logic here + if (!command.getRemoveDatastore()) { + final StorageFilerTO pool = command.getPool(); + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + + storagePoolMgr.deleteStoragePool(pool.getType(), pool.getUuid()); + } + return new Answer(command); } catch (final CloudRuntimeException e) { return new Answer(command, false, e.toString()); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index 9e7b78e5047..6ed56fb75ad 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -19,7 +19,12 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -28,12 +33,32 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import org.apache.commons.collections.MapUtils; +import org.apache.commons.io.IOUtils; import org.apache.log4j.Logger; + import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; import org.libvirt.LibvirtException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.NamedNodeMap; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateAnswer; import com.cloud.agent.api.MigrateCommand; @@ -45,6 +70,7 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.utils.Ternary; +import com.cloud.utils.exception.CloudRuntimeException; @ResourceWrapper(handles = MigrateCommand.class) public final class LibvirtMigrateCommandWrapper extends CommandWrapper { @@ -61,7 +87,7 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour String result = null; List ifaces = null; - List disks = null; + List disks; Domain dm = null; Connect dconn = null; @@ -69,6 +95,7 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour Connect conn = null; String xmlDesc = null; List> vmsnapshots = null; + try { final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); @@ -79,7 +106,7 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour /* We replace the private IP address with the address of the destination host. This is because the VNC listens on the private IP address of the hypervisor, - but that address is ofcourse different on the target host. + but that address is of course different on the target host. MigrateCommand.getDestinationIp() returns the private IP address of the target hypervisor. So it's safe to use. @@ -104,12 +131,19 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. // delete the metadata of vm snapshots before migration vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm); + Map mapMigrateStorage = command.getMigrateStorage(); + + if (MapUtils.isNotEmpty(mapMigrateStorage)) { + xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage); + } + dconn = libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system"); //run migration in thread so we can monitor it s_logger.info("Live migration of instance " + vmName + " initiated"); final ExecutorService executor = Executors.newFixedThreadPool(1); - final Callable worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, vmName, command.getDestinationIp()); + final Callable worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, MapUtils.isNotEmpty(mapMigrateStorage), + command.isAutoConvergence(), vmName, command.getDestinationIp()); final Future migrateThread = executor.submit(worker); executor.shutdown(); long sleeptime = 0; @@ -167,6 +201,21 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. } catch (final TimeoutException e) { s_logger.debug("Timed out while migrating domain: " + e.getMessage()); result = e.getMessage(); + } catch (final IOException e) { + s_logger.debug("IOException: " + e.getMessage()); + result = e.getMessage(); + } catch (final ParserConfigurationException e) { + s_logger.debug("ParserConfigurationException: " + e.getMessage()); + result = e.getMessage(); + } catch (final SAXException e) { + s_logger.debug("SAXException: " + e.getMessage()); + result = e.getMessage(); + } catch (final TransformerConfigurationException e) { + s_logger.debug("TransformerConfigurationException: " + e.getMessage()); + result = e.getMessage(); + } catch (final TransformerException e) { + s_logger.debug("TransformerException: " + e.getMessage()); + result = e.getMessage(); } finally { try { if (dm != null && result != null) { @@ -230,4 +279,138 @@ String replaceIpForVNCInDescFile(String xmlDesc, final String target) { } return xmlDesc; } + + // Pass in a list of the disks to update in the XML (xmlDesc). Each disk passed in needs to have a serial number. If any disk's serial number in the + // list does not match a disk in the XML, an exception should be thrown. + // In addition to the serial number, each disk in the list needs the following info: + // * The value of the 'type' of the disk (ex. file, block) + // * The value of the 'type' of the driver of the disk (ex. qcow2, raw) + // * The source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value. + private String replaceStorage(String xmlDesc, Map migrateStorage) + throws IOException, ParserConfigurationException, SAXException, TransformerException { + InputStream in = IOUtils.toInputStream(xmlDesc); + + DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document doc = docBuilder.parse(in); + + // Get the root element + Node domainNode = doc.getFirstChild(); + + NodeList domainChildNodes = domainNode.getChildNodes(); + + for (int i = 0; i < domainChildNodes.getLength(); i++) { + Node domainChildNode = domainChildNodes.item(i); + + if ("devices".equals(domainChildNode.getNodeName())) { + NodeList devicesChildNodes = domainChildNode.getChildNodes(); + + for (int x = 0; x < devicesChildNodes.getLength(); x++) { + Node deviceChildNode = devicesChildNodes.item(x); + + if ("disk".equals(deviceChildNode.getNodeName())) { + Node diskNode = deviceChildNode; + + String sourceFileDevText = getSourceFileDevText(diskNode); + + String path = getPathFromSourceFileDevText(migrateStorage.keySet(), sourceFileDevText); + + if (path != null) { + MigrateCommand.MigrateDiskInfo migrateDiskInfo = migrateStorage.remove(path); + + NamedNodeMap diskNodeAttributes = diskNode.getAttributes(); + Node diskNodeAttribute = diskNodeAttributes.getNamedItem("type"); + + diskNodeAttribute.setTextContent(migrateDiskInfo.getDiskType().toString()); + + NodeList diskChildNodes = diskNode.getChildNodes(); + + for (int z = 0; z < diskChildNodes.getLength(); z++) { + Node diskChildNode = diskChildNodes.item(z); + + if ("driver".equals(diskChildNode.getNodeName())) { + Node driverNode = diskChildNode; + + NamedNodeMap driverNodeAttributes = driverNode.getAttributes(); + Node driverNodeAttribute = driverNodeAttributes.getNamedItem("type"); + + driverNodeAttribute.setTextContent(migrateDiskInfo.getDriverType().toString()); + } else if ("source".equals(diskChildNode.getNodeName())) { + diskNode.removeChild(diskChildNode); + + Element newChildSourceNode = doc.createElement("source"); + + newChildSourceNode.setAttribute(migrateDiskInfo.getSource().toString(), migrateDiskInfo.getSourceText()); + + diskNode.appendChild(newChildSourceNode); + } else if ("auth".equals(diskChildNode.getNodeName())) { + diskNode.removeChild(diskChildNode); + } else if ("iotune".equals(diskChildNode.getNodeName())) { + diskNode.removeChild(diskChildNode); + } + } + } + } + } + } + } + + if (!migrateStorage.isEmpty()) { + throw new CloudRuntimeException("Disk info was passed into LibvirtMigrateCommandWrapper.replaceStorage that was not used."); + } + + return getXml(doc); + } + + private String getPathFromSourceFileDevText(Set paths, String sourceFileDevText) { + if (paths != null && sourceFileDevText != null) { + for (String path : paths) { + if (sourceFileDevText.contains(path)) { + return path; + } + } + } + + return null; + } + + private String getSourceFileDevText(Node diskNode) { + NodeList diskChildNodes = diskNode.getChildNodes(); + + for (int i = 0; i < diskChildNodes.getLength(); i++) { + Node diskChildNode = diskChildNodes.item(i); + + if ("source".equals(diskChildNode.getNodeName())) { + NamedNodeMap diskNodeAttributes = diskChildNode.getAttributes(); + + Node diskNodeAttribute = diskNodeAttributes.getNamedItem("file"); + + if (diskNodeAttribute != null) { + return diskNodeAttribute.getTextContent(); + } + + diskNodeAttribute = diskNodeAttributes.getNamedItem("dev"); + + if (diskNodeAttribute != null) { + return diskNodeAttribute.getTextContent(); + } + } + } + + return null; + } + + private String getXml(Document doc) throws TransformerException { + TransformerFactory transformerFactory = TransformerFactory.newInstance(); + Transformer transformer = transformerFactory.newTransformer(); + + DOMSource source = new DOMSource(doc); + + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + StreamResult result = new StreamResult(byteArrayOutputStream); + + transformer.transform(source, result); + + return byteArrayOutputStream.toString(); + } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java new file mode 100644 index 00000000000..311eb670e99 --- /dev/null +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -0,0 +1,95 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.MigrateVolumeAnswer; +import com.cloud.agent.api.storage.MigrateVolumeCommand; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; + +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + +@ResourceWrapper(handles = MigrateVolumeCommand.class) +public final class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtMigrateVolumeCommandWrapper.class); + + @Override + public Answer execute(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { + KVMStoragePoolManager storagePoolManager = libvirtComputingResource.getStoragePoolMgr(); + + VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData(); + PrimaryDataStoreTO srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore(); + + Map srcDetails = command.getSrcDetails(); + + String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath(); + + VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData(); + PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore(); + + Map destDetails = command.getDestDetails(); + + String destPath = destDetails != null && destDetails.get(DiskTO.IQN) != null ? destDetails.get(DiskTO.IQN) : + (destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString()); + + try { + storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails); + + KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); + + KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid()); + + storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails); + + storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds()); + } + catch (Exception ex) { + return new MigrateVolumeAnswer(command, false, ex.getMessage(), null); + } + finally { + try { + storagePoolManager.disconnectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath); + } + catch (Exception e) { + LOGGER.warn("Unable to disconnect from the destination device.", e); + } + + try { + storagePoolManager.disconnectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); + } + catch (Exception e) { + LOGGER.warn("Unable to disconnect from the source device.", e); + } + } + + return new MigrateVolumeAnswer(command, true, null, destPath); + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java new file mode 100644 index 00000000000..627d4b7beb6 --- /dev/null +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java @@ -0,0 +1,80 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyTargetsAnswer; +import com.cloud.agent.api.ModifyTargetsCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.exception.CloudRuntimeException; + +@ResourceWrapper(handles = ModifyTargetsCommand.class) +public final class LibvirtModifyTargetsCommandWrapper extends CommandWrapper { + private static final Logger s_logger = Logger.getLogger(LibvirtMigrateCommandWrapper.class); + + @Override + public Answer execute(final ModifyTargetsCommand command, final LibvirtComputingResource libvirtComputingResource) { + KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + + List> targets = command.getTargets(); + + // When attempting to connect to one or more targets, place the successfully connected path into this List. + List connectedPaths = new ArrayList<>(targets.size()); + + for (Map target : targets) { + StoragePoolType storagePoolType = StoragePoolType.valueOf(target.get(ModifyTargetsCommand.STORAGE_TYPE)); + String storageUuid = target.get(ModifyTargetsCommand.STORAGE_UUID); + String path = target.get(ModifyTargetsCommand.IQN); + + if (command.getAdd()) { + if (storagePoolMgr.connectPhysicalDisk(storagePoolType, storageUuid, path, target)) { + KVMPhysicalDisk kvmPhysicalDisk = storagePoolMgr.getPhysicalDisk(storagePoolType, storageUuid, path); + + connectedPaths.add(kvmPhysicalDisk.getPath()); + } + else { + throw new CloudRuntimeException("Unable to connect to the following target: " + path); + } + } + else { + if (!storagePoolMgr.disconnectPhysicalDisk(storagePoolType, storageUuid, path)) { + throw new CloudRuntimeException("Unable to disconnect from the following target: " + path); + } + } + } + + ModifyTargetsAnswer modifyTargetsAnswer = new ModifyTargetsAnswer(); + + modifyTargetsAnswer.setConnectedPaths(connectedPaths); + + return modifyTargetsAnswer; + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index 940a0a727ef..ac9f884042a 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -45,6 +45,11 @@ @Override public Answer execute(final PrepareForMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) { final VirtualMachineTO vm = command.getVirtualMachine(); + + if (command.isRollback()) { + return handleRollback(command, libvirtComputingResource); + } + if (s_logger.isDebugEnabled()) { s_logger.debug("Preparing host for migrating " + vm); } @@ -89,4 +94,15 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom } } } -} \ No newline at end of file + + private Answer handleRollback(PrepareForMigrationCommand command, LibvirtComputingResource libvirtComputingResource) { + KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + VirtualMachineTO vmTO = command.getVirtualMachine(); + + if (!storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vmTO)) { + return new PrepareForMigrationAnswer(command, "failed to disconnect physical disks from host"); + } + + return new PrepareForMigrationAnswer(command); + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java index 7e4ee22445a..bb837b56a24 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java @@ -19,8 +19,9 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import java.util.List; import java.io.File; +import java.util.List; +import java.util.Map; import com.cloud.utils.Pair; import com.cloud.utils.ssh.SshHelper; @@ -88,9 +89,23 @@ public Answer execute(final StopCommand command, final LibvirtComputingResource final String result = libvirtComputingResource.stopVM(conn, vmName, command.isForceStop()); if (result == null) { - for (final DiskDef disk : disks) { - libvirtComputingResource.cleanupDisk(disk); + if (disks != null && disks.size() > 0) { + for (final DiskDef disk : disks) { + libvirtComputingResource.cleanupDisk(disk); + } } + else { + // When using iSCSI-based managed storage, if the user shuts a VM down from the guest OS (as opposed to doing so from CloudStack), + // info needs to be passed to the KVM agent to have it disconnect KVM from the applicable iSCSI volumes. + List> volumesToDisconnect = command.getVolumesToDisconnect(); + + if (volumesToDisconnect != null) { + for (Map volumeToDisconnect : volumesToDisconnect) { + libvirtComputingResource.cleanupDisk(volumeToDisconnect); + } + } + } + for (final InterfaceDef iface : ifaces) { // We don't know which "traffic type" is associated with // each interface at this point, so inform all vif drivers diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 46a48c95bb7..a90c97fef8e 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -20,12 +20,15 @@ import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.log4j.Logger; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import com.cloud.agent.api.to.DiskTO; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.StringUtils; @@ -37,7 +40,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { private static final Logger s_logger = Logger.getLogger(IscsiAdmStorageAdaptor.class); - private static final Map MapStorageUuidToStoragePool = new HashMap(); + private static final Map MapStorageUuidToStoragePool = new HashMap<>(); @Override public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) { @@ -115,7 +118,7 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map 0 && numberOfTries > 0) { + numberOfTries--; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + } + private void executeChapCommand(String path, KVMStoragePool pool, String nParameter, String vParameter, String detail) throws Exception { Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); @@ -193,13 +213,13 @@ private void executeChapCommand(String path, KVMStoragePool pool, String nParame } // example by-path: /dev/disk/by-path/ip-192.168.233.10:3260-iscsi-iqn.2012-03.com.solidfire:storagepool2-lun-0 - private String getByPath(String host, String path) { - return "/dev/disk/by-path/ip-" + host + "-iscsi-" + getIqn(path) + "-lun-" + getLun(path); + private static String getByPath(String host, int port, String path) { + return "/dev/disk/by-path/ip-" + host + ":" + port + "-iscsi-" + getIqn(path) + "-lun-" + getLun(path); } @Override public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { - String deviceByPath = getByPath(pool.getSourceHost() + ":" + pool.getSourcePort(), volumeUuid); + String deviceByPath = getByPath(pool.getSourceHost(), pool.getSourcePort(), volumeUuid); KVMPhysicalDisk physicalDisk = new KVMPhysicalDisk(deviceByPath, volumeUuid, pool); physicalDisk.setFormat(PhysicalDiskFormat.RAW); @@ -226,6 +246,9 @@ private long getDeviceSize(String deviceByPath) { return 0; } + else { + s_logger.info("Successfully retrieved the size of device " + deviceByPath); + } return Long.parseLong(parser.getLine()); } @@ -252,10 +275,10 @@ private static String getComponent(String path, int index) { return tmp[index].trim(); } - public boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) { + private boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) { // use iscsiadm to log out of the iSCSI target and un-discover it - // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10 --logout + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --logout Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); iScsiAdmCmd.add("-m", "node"); @@ -295,6 +318,8 @@ public boolean disconnectPhysicalDisk(String host, int port, String iqn, String System.out.println("Removed iSCSI target /" + iqn + "/" + lun); } + waitForDiskToBecomeUnavailable(host, port, iqn, lun); + return true; } @@ -303,6 +328,19 @@ public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) { return disconnectPhysicalDisk(pool.getSourceHost(), pool.getSourcePort(), getIqn(volumeUuid), getLun(volumeUuid)); } + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + String host = volumeToDisconnect.get(DiskTO.STORAGE_HOST); + String port = volumeToDisconnect.get(DiskTO.STORAGE_PORT); + String path = volumeToDisconnect.get(DiskTO.IQN); + + if (host != null && port != null && path != null) { + return disconnectPhysicalDisk(host, Integer.parseInt(port), getIqn(path), getLun(path)); + } + + return false; + } + @Override public boolean disconnectPhysicalDiskByPath(String localPath) { String search1 = "/dev/disk/by-path/ip-"; @@ -310,7 +348,7 @@ public boolean disconnectPhysicalDiskByPath(String localPath) { String search3 = "-iscsi-"; String search4 = "-lun-"; - if (localPath.indexOf(search3) == -1) { + if (!localPath.contains(search3)) { // this volume doesn't below to this adaptor, so just return true return true; } @@ -356,8 +394,37 @@ public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, } @Override - public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { - throw new UnsupportedOperationException("Copying a disk is not supported in this configuration."); + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk srcDisk, String destVolumeUuid, KVMStoragePool destPool, int timeout) { + QemuImg q = new QemuImg(timeout); + + QemuImgFile srcFile; + + KVMStoragePool srcPool = srcDisk.getPool(); + + if (srcPool.getType() == StoragePoolType.RBD) { + srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(), srcPool.getSourcePort(), + srcPool.getAuthUserName(), srcPool.getAuthSecret(), + srcDisk.getPath()),srcDisk.getFormat()); + } else { + srcFile = new QemuImgFile(srcDisk.getPath(), srcDisk.getFormat()); + } + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(destVolumeUuid); + + QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + try { + q.convert(srcFile, destFile); + } catch (QemuImgException ex) { + String msg = "Failed to copy data from " + srcDisk.getPath() + " to " + + destDisk.getPath() + ". The error was the following: " + ex.getMessage(); + + s_logger.error(msg); + + throw new CloudRuntimeException(msg); + } + + return destPool.getPhysicalDisk(destVolumeUuid); } @Override diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 28e5f03d512..c6135080671 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -158,6 +158,18 @@ public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { return result; } + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + for (Map.Entry set : _storageMapper.entrySet()) { + StorageAdaptor adaptor = set.getValue(); + + if (adaptor.disconnectPhysicalDisk(volumeToDisconnect)) { + return true; + } + } + + return false; + } + public boolean disconnectPhysicalDiskByPath(String path) { for (Map.Entry set : _storageMapper.entrySet()) { StorageAdaptor adaptor = set.getValue(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 72e0ca2ab2a..f09e8f7b0ea 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -237,8 +237,17 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); } else if (destData instanceof TemplateObjectTO) { - final TemplateObjectTO destTempl = (TemplateObjectTO)destData; - primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); + TemplateObjectTO destTempl = (TemplateObjectTO)destData; + + Map details = primaryStore.getDetails(); + + String path = details != null ? details.get("managedStoreTarget") : null; + + storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); + + primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); + + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); } @@ -422,24 +431,41 @@ public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) { } } + Map details = cmd.getOptions2(); + + String path = details != null ? details.get(DiskTO.IQN) : null; + + storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); + final String volumeName = UUID.randomUUID().toString(); final int index = srcVolumePath.lastIndexOf(File.separator); final String volumeDir = srcVolumePath.substring(0, index); String srcVolumeName = srcVolumePath.substring(index + 1); + secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + volumeDir); + if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) { srcVolumeName = srcVolumeName + ".qcow2"; } + final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(srcVolumeName); + volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); - final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, cmd.getWaitInMillSeconds()); + + final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, path != null ? path : volumeName, primaryPool, cmd.getWaitInMillSeconds()); + + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + final VolumeObjectTO newVol = new VolumeObjectTO(); + newVol.setFormat(ImageFormat.valueOf(newDisk.getFormat().toString().toUpperCase())); - newVol.setPath(volumeName); + newVol.setPath(path != null ? path : volumeName); + return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to ccopyVolumeFromImageCacheToPrimary: ", e); + s_logger.debug("Failed to copyVolumeFromImageCacheToPrimary: ", e); + return new CopyCmdAnswer(e.toString()); } finally { if (secondaryStoragePool != null) { @@ -496,6 +522,13 @@ public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) { @Override public Answer createTemplateFromVolume(final CopyCommand cmd) { + Map details = cmd.getOptions(); + + if (details != null && details.get(DiskTO.IQN) != null) { + // use the managed-storage approach + return createTemplateFromVolumeOrSnapshot(cmd); + } + final DataTO srcData = cmd.getSrcTO(); final DataTO destData = cmd.getDestTO(); final int wait = cmd.getWaitInMillSeconds(); @@ -510,7 +543,8 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { final NfsTO nfsImageStore = (NfsTO)imageStore; KVMStoragePool secondaryStorage = null; - KVMStoragePool primary = null; + KVMStoragePool primary; + try { final String templateFolder = template.getPath(); @@ -614,8 +648,139 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { } @Override - public Answer createTemplateFromSnapshot(final CopyCommand cmd) { - return null; //To change body of implemented methods use File | Settings | File Templates. + public Answer createTemplateFromSnapshot(CopyCommand cmd) { + Map details = cmd.getOptions(); + + if (details != null && details.get(DiskTO.IQN) != null) { + // use the managed-storage approach + return createTemplateFromVolumeOrSnapshot(cmd); + } + + return new CopyCmdAnswer("operation not supported"); + } + + private Answer createTemplateFromVolumeOrSnapshot(CopyCommand cmd) { + DataTO srcData = cmd.getSrcTO(); + + final boolean isVolume; + + if (srcData instanceof VolumeObjectTO) { + isVolume = true; + } + else if (srcData instanceof SnapshotObjectTO) { + isVolume = false; + } + else { + return new CopyCmdAnswer("unsupported object type"); + } + + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore(); + + DataTO destData = cmd.getDestTO(); + TemplateObjectTO template = (TemplateObjectTO)destData; + DataStoreTO imageStore = template.getDataStore(); + + if (!(imageStore instanceof NfsTO)) { + return new CopyCmdAnswer("unsupported protocol"); + } + + NfsTO nfsImageStore = (NfsTO)imageStore; + + KVMStoragePool secondaryStorage = null; + + try { + Map details = cmd.getOptions(); + + String path = details != null ? details.get(DiskTO.IQN) : null; + + if (path == null) { + new CloudRuntimeException("The 'path' field must be specified."); + } + + storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); + + KVMPhysicalDisk srcDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + + secondaryStorage = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl()); + + String templateFolder = template.getPath(); + String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateFolder; + + storageLayer.mkdirs(tmpltPath); + + String templateName = UUID.randomUUID().toString(); + + s_logger.debug("Converting " + srcDisk.getFormat().toString() + " disk " + srcDisk.getPath() + " into template " + templateName); + + String destName = templateFolder + "/" + templateName + ".qcow2"; + + storagePoolMgr.copyPhysicalDisk(srcDisk, destName, secondaryStorage, cmd.getWaitInMillSeconds()); + + File templateProp = new File(tmpltPath + "/template.properties"); + + if (!templateProp.exists()) { + templateProp.createNewFile(); + } + + String templateContent = "filename=" + templateName + ".qcow2" + System.getProperty("line.separator"); + + DateFormat dateFormat = new SimpleDateFormat("MM_dd_yyyy"); + Date date = new Date(); + + if (isVolume) { + templateContent += "volume.name=" + dateFormat.format(date) + System.getProperty("line.separator"); + } + else { + templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator"); + } + + FileOutputStream templFo = new FileOutputStream(templateProp); + + templFo.write(templateContent.getBytes()); + templFo.flush(); + templFo.close(); + + Map params = new HashMap<>(); + + params.put(StorageLayer.InstanceConfigKey, storageLayer); + + Processor qcow2Processor = new QCOW2Processor(); + + qcow2Processor.configure("QCOW2 Processor", params); + + FormatInfo info = qcow2Processor.process(tmpltPath, null, templateName); + + TemplateLocation loc = new TemplateLocation(storageLayer, tmpltPath); + + loc.create(1, true, templateName); + loc.addFormat(info); + loc.save(); + + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + + TemplateObjectTO newTemplate = new TemplateObjectTO(); + + newTemplate.setPath(templateFolder + File.separator + templateName + ".qcow2"); + newTemplate.setSize(info.virtualSize); + newTemplate.setPhysicalSize(info.size); + newTemplate.setFormat(ImageFormat.QCOW2); + newTemplate.setName(templateName); + + return new CopyCmdAnswer(newTemplate); + } catch (Exception ex) { + if (isVolume) { + s_logger.debug("Failed to create template from volume: ", ex); + } + else { + s_logger.debug("Failed to create template from snapshot: ", ex); + } + + return new CopyCmdAnswer(ex.toString()); + } finally { + if (secondaryStorage != null) { + secondaryStorage.delete(); + } + } } protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException { @@ -1327,7 +1492,17 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { final String primaryUuid = pool.getUuid(); final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getPoolType(), primaryUuid); final String volUuid = UUID.randomUUID().toString(); - final KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool, cmd.getWaitInMillSeconds()); + + Map details = cmd.getOptions2(); + + String path = details != null ? details.get(DiskTO.IQN) : null; + + storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details); + + KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, path != null ? path : volUuid, primaryPool, cmd.getWaitInMillSeconds()); + + storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path); + final VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(disk.getName()); newVol.setSize(disk.getVirtualSize()); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 6a0430f86ea..792fc6958cd 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -760,6 +760,12 @@ public boolean disconnectPhysicalDisk(String uuid, KVMStoragePool pool) { return true; } + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + // this is for managed storage that needs to cleanup disks after use + return false; + } + @Override public boolean disconnectPhysicalDiskByPath(String localPath) { // we've only ever cleaned up ISOs that are NFS mounted diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java index 72edb134608..596582db34d 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java @@ -260,6 +260,11 @@ public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) { } } + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + return false; + } + @Override public boolean disconnectPhysicalDiskByPath(String localPath) { return false; diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 43f94fbc57f..2c1ed233b40 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -48,6 +48,8 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, // given disk path (per database) and pool, clean up disk on host public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool); + public boolean disconnectPhysicalDisk(Map volumeToDisconnect); + // given local path to file/device (per Libvirt XML), 1) check that device is // handled by your adaptor, return false if not. 2) clean up device, return true public boolean disconnectPhysicalDiskByPath(String localPath); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 6248b7c6b3a..d2e8b91d7f0 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -253,6 +253,7 @@ import com.cloud.hypervisor.vmware.mo.DiskControllerType; import com.cloud.hypervisor.vmware.mo.FeatureKeyConstants; import com.cloud.hypervisor.vmware.mo.HostMO; +import com.cloud.hypervisor.vmware.mo.HostDatastoreSystemMO; import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; @@ -308,6 +309,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); + private static final Random RANDOM = new Random(System.nanoTime()); + protected String _name; protected final long _opsTimeout = 900000; // 15 minutes time out to time @@ -680,10 +683,8 @@ private Answer execute(ResizeVolumeCommand cmd) { boolean useWorkerVm = false; VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); - String poolId = cmd.getPoolUuid(); VirtualMachineMO vmMo = null; - DatastoreMO dsMo = null; - ManagedObjectReference morDS = null; + String vmdkDataStorePath = null; try { @@ -693,43 +694,80 @@ private Answer execute(ResizeVolumeCommand cmd) { } else if (newSize == oldSize) { return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB); } + if (vmName.equalsIgnoreCase("none")) { - // we need to spawn a worker VM to attach the volume to and - // resize the volume. + // we need to spawn a worker VM to attach the volume to and resize the volume. useWorkerVm = true; vmName = getWorkerName(getServiceContext(), cmd, 0); - morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId); - dsMo = new DatastoreMO(hyperHost.getContext(), morDS); + String poolId = cmd.getPoolUuid(); + + ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId); + DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS); + s_logger.info("Create worker VM " + vmName); + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName); + if (vmMo == null) { throw new Exception("Unable to create a worker VM for volume resize"); } synchronized (this) { vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk"); - vmMo.attachDisk(new String[] {vmdkDataStorePath}, morDS); + + vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS); } } + // find VM through datacenter (VM is not at the target host yet) vmMo = hyperHost.findVmOnPeerHyperHost(vmName); + if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter"; + s_logger.error(msg); + throw new Exception(msg); } Pair vdisk = vmMo.getDiskDevice(path); + if (vdisk == null) { - if (s_logger.isTraceEnabled()) + if (s_logger.isTraceEnabled()) { s_logger.trace("resize volume done (failed)"); + } + throw new Exception("No such disk device: " + path); } + // IDE virtual disk cannot be re-sized if VM is running if (vdisk.second() != null && vdisk.second().contains("ide")) { - throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. " - + "Please re-try when virtual disk is attached to a VM using SCSI controller."); + throw new Exception("Re-sizing a virtual disk over an IDE controller is not supported in the VMware hypervisor. " + + "Please re-try when virtual disk is attached to a VM using a SCSI controller."); + } + + if (cmd.isManaged()) { + VmwareContext context = getServiceContext(); + + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO clusterMO = new ClusterMO(context, morCluster); + + List> lstHosts = clusterMO.getClusterHosts(); + + Collections.shuffle(lstHosts, RANDOM); + + Pair host = lstHosts.get(0); + + HostMO hostMO = new HostMO(context, host.first()); + HostDatastoreSystemMO hostDatastoreSystem = hostMO.getHostDatastoreSystemMO(); + + String iScsiName = cmd.get_iScsiName(); + + ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, VmwareResource.getDatastoreName(iScsiName)); + DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS); + + _storageProcessor.expandDatastore(hostDatastoreSystem, dsMo); } if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi")) @@ -744,17 +782,22 @@ private Answer execute(ResizeVolumeCommand cmd) { throw new Exception("Resize is not supported because Disk device has Parent "+ ((VirtualDiskFlatVer2BackingInfo)disk.getBacking()).getParent().getUuid()); } String vmdkAbsFile = getAbsoluteVmdkFile(disk); + if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); } disk.setCapacityInKB(newSize); - VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(disk); deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); + if (!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure VM to resize disk. vmName: " + vmName); } @@ -762,12 +805,15 @@ private Answer execute(ResizeVolumeCommand cmd) { return new ResizeVolumeAnswer(cmd, true, "success", newSize * 1024); } catch (Exception e) { s_logger.error("Unable to resize volume", e); + String error = "Failed to resize volume: " + e.getMessage(); + return new ResizeVolumeAnswer(cmd, false, error); } finally { try { - if (useWorkerVm == true) { + if (useWorkerVm) { s_logger.info("Destroy worker VM after volume resize"); + vmMo.detachDisk(vmdkDataStorePath, false); vmMo.destroy(); } @@ -2190,9 +2236,9 @@ protected StartAnswer execute(StartCommand cmd) { vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask)); postNvpConfigBeforeStart(vmMo, vmSpec); - Map iqnToPath = new HashMap(); + Map> iqnToData = new HashMap<>(); - postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToPath, hyperHost, context); + postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToData, hyperHost, context); // // Power-on VM @@ -2203,7 +2249,7 @@ protected StartAnswer execute(StartCommand cmd) { StartAnswer startAnswer = new StartAnswer(cmd); - startAnswer.setIqnToPath(iqnToPath); + startAnswer.setIqnToData(iqnToData); // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it. if (existingVmName != null && existingVmFileLayout != null) { @@ -2460,11 +2506,21 @@ int getReservedCpuMHZ(VirtualMachineTO vmSpec) { final String datastoreDiskPath; if (isManaged) { + String vmdkPath = new DatastoreFile(volumeTO.getPath()).getFileBaseName(); + if (volumeTO.getVolumeType() == Volume.Type.ROOT) { - datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getName(), VmwareManager.s_vmwareSearchExcludeFolder.value()); + if (vmdkPath == null) { + vmdkPath = volumeTO.getName(); + } + + datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, vmdkPath); } else { - datastoreDiskPath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + if (vmdkPath == null) { + vmdkPath = dsMo.getName(); + } + + datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + ".vmdk"); } } else { datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value()); @@ -2822,8 +2878,8 @@ private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo m } } - private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, int scsiControllerKey, - Map iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { + private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, + int scsiControllerKey, Map> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); for (DiskTO vol : sortedDisks) { @@ -2862,10 +2918,18 @@ private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO v } VolumeObjectTO volInSpec = getVolumeInSpec(vmSpec, volumeTO); + if (volInSpec != null) { if (managed) { + Map data = new HashMap<>(); + String datastoreVolumePath = diskChain[0]; - iqnToPath.put(details.get(DiskTO.IQN), datastoreVolumePath); + + data.put(StartAnswer.PATH, datastoreVolumePath); + data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.OVA.toString()); + + iqnToData.put(details.get(DiskTO.IQN), data); + vol.setPath(datastoreVolumePath); volumeTO.setPath(datastoreVolumePath); volInSpec.setPath(datastoreVolumePath); @@ -2972,9 +3036,39 @@ public int compare(DiskTO arg0, DiskTO arg1) { return listForSort.toArray(new DiskTO[0]); } - private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, DiskTO[] disks, - Command cmd) throws Exception { - HashMap> mapIdToMors = new HashMap>(); + /** + * Only call this for managed storage. + * Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18" + */ + public String getVmdkPath(String path) { + if (!com.cloud.utils.StringUtils.isNotBlank(path)) { + return null; + } + + final String search = "]"; + + int startIndex = path.indexOf(search); + + if (startIndex == -1) { + return null; + } + + path = path.substring(startIndex + search.length()); + + final String search2 = ".vmdk"; + + int endIndex = path.indexOf(search2); + + if (endIndex == -1) { + return null; + } + + return path.substring(0, endIndex).trim(); + } + + private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, + DiskTO[] disks, Command cmd) throws Exception { + HashMap> mapIdToMors = new HashMap<>(); assert (hyperHost != null) && (context != null); @@ -3000,20 +3094,33 @@ public int compare(DiskTO arg0, DiskTO arg1) { // if the datastore is not present, we need to discover the iSCSI device that will support it, // create the datastore, and create a VMDK file in the datastore if (morDatastore == null) { - morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, details.get(DiskTO.STORAGE_HOST), - Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null, - details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME), - details.get(DiskTO.CHAP_TARGET_SECRET), Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); + final String vmdkPath = getVmdkPath(volumeTO.getPath()); + + morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, + details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), + vmdkPath, + details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), + details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET), + Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); - String datastoreVolumePath = dsMo.getDatastorePath((volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : dsMo.getName()) + ".vmdk"); + + final String datastoreVolumePath; + + if (vmdkPath != null) { + datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + ".vmdk"); + } + else { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } volumeTO.setPath(datastoreVolumePath); vol.setPath(datastoreVolumePath); } - mapIdToMors.put(datastoreName, new Pair(morDatastore, new DatastoreMO(context, morDatastore))); - } else { + mapIdToMors.put(datastoreName, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); + } + else { ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid); if (morDatastore == null) { @@ -3024,7 +3131,7 @@ public int compare(DiskTO arg0, DiskTO arg1) { throw new Exception(msg); } - mapIdToMors.put(poolUuid, new Pair(morDatastore, new DatastoreMO(context, morDatastore))); + mapIdToMors.put(poolUuid, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); } } } @@ -4100,9 +4207,35 @@ protected Answer execute(CreateStoragePoolCommand cmd) { } protected Answer execute(ModifyTargetsCommand cmd) { - VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + VmwareContext context = getServiceContext(cmd); + VmwareHypervisorHost hyperHost = getHyperHost(context); + + List hostMOs = new ArrayList<>(); + + if (cmd.getApplyToAllHostsInCluster()) { + try { + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO clusterMO = new ClusterMO(context, morCluster); + + List> hosts = clusterMO.getClusterHosts(); + + for (Pair host : hosts) { + HostMO hostMO = new HostMO(context, host.first()); - handleTargets(cmd.getAdd(), cmd.getTargets(), (HostMO)hyperHost); + hostMOs.add(hostMO); + } + } + catch (Exception ex) { + s_logger.error(ex.getMessage(), ex); + + throw new CloudRuntimeException(ex.getMessage(), ex); + } + } + else { + hostMOs.add((HostMO)hyperHost); + } + + handleTargets(cmd.getAdd(), cmd.getTargetTypeToRemove(), cmd.isRemoveAsync(), cmd.getTargets(), hostMOs); return new ModifyTargetsAnswer(); } @@ -4133,7 +4266,7 @@ protected Answer execute(ModifyStoragePoolCommand cmd) { long capacity = summary.getCapacity(); long available = summary.getFreeSpace(); - Map tInfo = new HashMap(); + Map tInfo = new HashMap<>(); ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo); if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) { @@ -4156,11 +4289,13 @@ protected Answer execute(ModifyStoragePoolCommand cmd) { } } - private void handleTargets(boolean add, List> targets, HostMO host) { + private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove, boolean isRemoveAsync, + List> targets, List hosts) { if (targets != null && targets.size() > 0) { try { - _storageProcessor.handleTargetsForHost(add, targets, host); - } catch (Exception ex) { + _storageProcessor.handleTargets(add, targetTypeToRemove, isRemoveAsync, targets, hosts); + } + catch (Exception ex) { s_logger.warn(ex.getMessage()); } } @@ -4173,8 +4308,9 @@ protected Answer execute(DeleteStoragePoolCommand cmd) { try { if (cmd.getRemoveDatastore()) { - _storageProcessor.handleDatastoreAndVmdkDetach(cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN), - cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT))); + _storageProcessor.handleDatastoreAndVmdkDetach(cmd, cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), + cmd.getDetails().get(DeleteStoragePoolCommand.IQN), cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), + Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT))); return new Answer(cmd, true, "success"); } else { @@ -4204,6 +4340,10 @@ public static String getDatastoreName(String str) { return str.replace('/', '-'); } + public static String createDatastoreNameFromIqn(String iqn) { + return "-" + iqn + "-0"; + } + protected AttachIsoAnswer execute(AttachIsoCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd)); @@ -5119,7 +5259,11 @@ private String getIqn() { for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) { if (hba instanceof HostInternetScsiHba) { - return ((HostInternetScsiHba)hba).getIScsiName(); + HostInternetScsiHba hostInternetScsiHba = (HostInternetScsiHba)hba; + + if (hostInternetScsiHba.isIsSoftwareBased()) { + return ((HostInternetScsiHba)hba).getIScsiName(); + } } } } @@ -5991,7 +6135,7 @@ protected VirtualMachineMO findVmOnDatacenter(VmwareContext context, VmwareHyper return dcMo.findVm(vol.getPath()); } - private String getAbsoluteVmdkFile(VirtualDisk disk) { + public String getAbsoluteVmdkFile(VirtualDisk disk) { String vmdkAbsFile = null; VirtualDeviceBackingInfo backingInfo = disk.getBacking(); if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 9cbc7a74fbc..0cea62f18fa 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -39,19 +40,32 @@ import com.google.common.base.Strings; import com.google.gson.Gson; +import com.vmware.vim25.DatastoreHostMount; import com.vmware.vim25.HostHostBusAdapter; import com.vmware.vim25.HostInternetScsiHba; import com.vmware.vim25.HostInternetScsiHbaAuthenticationProperties; +import com.vmware.vim25.HostInternetScsiHbaSendTarget; import com.vmware.vim25.HostInternetScsiHbaStaticTarget; import com.vmware.vim25.HostInternetScsiTargetTransport; +import com.vmware.vim25.HostResignatureRescanResult; +import com.vmware.vim25.HostUnresolvedVmfsResignatureSpec; import com.vmware.vim25.HostScsiDisk; import com.vmware.vim25.HostScsiTopology; import com.vmware.vim25.HostScsiTopologyInterface; import com.vmware.vim25.HostScsiTopologyLun; import com.vmware.vim25.HostScsiTopologyTarget; +import com.vmware.vim25.HostUnresolvedVmfsExtent; +import com.vmware.vim25.HostUnresolvedVmfsVolume; +import com.vmware.vim25.InvalidStateFaultMsg; import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.VirtualDeviceBackingInfo; +import com.vmware.vim25.VirtualDeviceConfigSpec; +import com.vmware.vim25.VirtualDeviceConfigSpecOperation; +import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; +import com.vmware.vim25.VmfsDatastoreExpandSpec; +import com.vmware.vim25.VmfsDatastoreOption; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; @@ -141,9 +155,10 @@ public String getName() { private final Gson _gson; private final StorageLayer _storage = new JavaStorageLayer(); private Integer _nfsVersion; + private static final Random RANDOM = new Random(System.nanoTime()); public VmwareStorageProcessor(VmwareHostService hostService, boolean fullCloneFlag, VmwareStorageMount mountService, Integer timeout, VmwareResource resource, - Integer shutdownWaitMs, PremiumSecondaryStorageResource storageResource, Integer nfsVersion) { + Integer shutdownWaitMs, PremiumSecondaryStorageResource storageResource, Integer nfsVersion) { this.hostService = hostService; _fullCloneFlag = fullCloneFlag; this.mountService = mountService; @@ -163,9 +178,288 @@ public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) { @Override public ResignatureAnswer resignature(ResignatureCommand cmd) { - s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for VmwareStorageProcessor"); + final Map details = cmd.getDetails(); - return new ResignatureAnswer(); + String scsiNaaDeviceId = details.get(DiskTO.SCSI_NAA_DEVICE_ID); + + if (scsiNaaDeviceId == null || scsiNaaDeviceId.trim().length() == 0) { + throw new CloudRuntimeException("The 'scsiNaaDeviceId' needs to be specified when resignaturing a VMware datastore."); + } + + final String iScsiName = details.get(DiskTO.IQN); + final String datastoreName = getMaximumDatastoreName(VmwareResource.getDatastoreName(iScsiName)); + + String vmdk = null; + + try { + VmwareContext context = hostService.getServiceContext(null); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); + + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO clusterMO = new ClusterMO(context, morCluster); + + List> lstHosts = clusterMO.getClusterHosts(); + + // add iSCSI connection to host + + final String storageHost = details.get(DiskTO.STORAGE_HOST); + final int storagePortNumber = Integer.parseInt(details.get(DiskTO.STORAGE_PORT)); + final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME); + final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET); + final String chapTargetUsername = details.get(DiskTO.CHAP_TARGET_USERNAME); + final String chapTargetSecret = details.get(DiskTO.CHAP_TARGET_SECRET); + + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageHost, lstHosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); + + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + List lstTargets = getTargets(storageHost, storagePortNumber, trimIqn(iScsiName), + chapInitiatorUsername, chapInitiatorSecret, chapTargetUsername, chapTargetSecret); + + addRemoveInternetScsiTargetsToAllHosts(true, lstTargets, hostsUsingStaticDiscovery); + } + + rescanAllHosts(context, lstHosts, true, true); + + // perform resignature operation + + HostMO hostMO = new HostMO(context, lstHosts.get(0).first()); + + HostDatastoreSystemMO hostDatastoreSystem = hostMO.getHostDatastoreSystemMO(); + + List hostUnresolvedVmfsVolumes = hostDatastoreSystem.queryUnresolvedVmfsVolumes(); + + if (hostUnresolvedVmfsVolumes == null || hostUnresolvedVmfsVolumes.size() == 0) { + throw new CloudRuntimeException("Unable to locate any snapshot datastores"); + } + + boolean foundExtent = false; + + for (HostUnresolvedVmfsVolume hostUnresolvedVmfsVolume : hostUnresolvedVmfsVolumes) { + List extents = hostUnresolvedVmfsVolume.getExtent(); + List matchingExtents = getExtentsMatching(extents, scsiNaaDeviceId); + + if (matchingExtents.size() >= 1) { + String extentDevicePath = matchingExtents.get(0).getDevicePath(); + HostResignatureRescanResult hostResignatureRescanResult = resignatureDatastore(hostDatastoreSystem, extentDevicePath); + + if (hostResignatureRescanResult == null) { + throw new CloudRuntimeException("'hostResignatureRescanResult' should not be 'null'."); + } + + ManagedObjectReference morDs = hostResignatureRescanResult.getResult(); + + if (morDs == null) { + throw new CloudRuntimeException("'morDs' should not be 'null'."); + } + + DatastoreMO datastoreMO = new DatastoreMO(context, morDs); + + boolean isOnlyForTemplate = Boolean.parseBoolean(details.get(DiskTO.TEMPLATE_RESIGN)); + + // If this is only for a template, all we really want to do is resignature the datastore (done at this point), + // then rename the datastore. + if (isOnlyForTemplate) { + vmdk = details.get(DiskTO.VMDK); + } + else { + vmdk = cleanUpDatastore(cmd, hostDatastoreSystem, datastoreMO, details); + } + + if (renameDatastore(context, morDs, datastoreName, lstHosts)) { + foundExtent = true; + + break; + } + } + } + + removeVmfsDatastore(cmd, hyperHost, datastoreName, storageHost, storagePortNumber, trimIqn(iScsiName), lstHosts); + + if (!foundExtent) { + throw new CloudRuntimeException("Unable to locate the applicable extent"); + } + + final ResignatureAnswer answer = new ResignatureAnswer(); + + final long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE)); + + answer.setSize(volumeSize); + + answer.setPath("[" + datastoreName + "] " + vmdk); + + answer.setFormat(ImageFormat.OVA); + + return answer; + } + catch (Exception ex) { + s_logger.debug(ex.getMessage()); + + throw new CloudRuntimeException(ex.getMessage()); + } + } + + private List getExtentsMatching(List extents, String naa) { + List matchingExtents = new ArrayList<>(); + + if (extents != null) { + for (HostUnresolvedVmfsExtent extent : extents) { + s_logger.debug("extent devicePath=" + extent.getDevicePath() + ", ordinal=" + extent.getOrdinal() + + ", reason=" + extent.getReason() + ", isHeadExtent=" + extent.isIsHeadExtent()); + + String extentDevicePath = extent.getDevicePath(); + + if (extentDevicePath.contains(naa)) { + matchingExtents.add(extent); + } + } + } + + return matchingExtents; + } + + private class HostUnresolvedVmfsResignatureSpecCustom extends HostUnresolvedVmfsResignatureSpec { + private HostUnresolvedVmfsResignatureSpecCustom(String extentDevicePath) { + this.extentDevicePath = new ArrayList<>(1); + + this.extentDevicePath.add(extentDevicePath); + } + } + + private HostResignatureRescanResult resignatureDatastore(HostDatastoreSystemMO hostDatastoreSystemMO, String extentDevicePath) throws Exception { + HostUnresolvedVmfsResignatureSpecCustom resignatureSpec = new HostUnresolvedVmfsResignatureSpecCustom(extentDevicePath); + + return hostDatastoreSystemMO.resignatureUnresolvedVmfsVolume(resignatureSpec); + } + + private boolean renameDatastore(VmwareContext context, ManagedObjectReference morDs, String newName, List> lstHosts) throws Exception { + if (morDs != null) { + DatastoreMO datastoreMO = new DatastoreMO(context, morDs); + + datastoreMO.renameDatastore(newName); + + waitForAllHostsToMountDatastore(lstHosts, datastoreMO); + + return true; + } + + s_logger.debug("Unable to locate datastore to rename"); + + return false; + } + + private String getMaximumDatastoreName(String datastoreName) { + final int maxDatastoreNameLength = 80; + + return datastoreName.length() > maxDatastoreNameLength ? datastoreName.substring(0, maxDatastoreNameLength) : datastoreName; + } + + /** + * 1) Possibly expand the datastore. + * 2) Possibly consolidate all relevant VMDK files into one VMDK file. + * 3) Possibly move the VMDK file to the root folder (may already be there). + * 4) If the VMDK file wasn't already in the root folder, then delete the folder the VMDK file was in. + * 5) Possibly rename the VMDK file (this will lead to there being a delta file with the new name and the + * original file with the original name). + * + * Note: If the underlying VMDK file was for a root disk, the 'vmdk' parameter's value might look, for example, + * like "i-2-32-VM/ROOT-32.vmdk". + * + * Note: If the underlying VMDK file was for a data disk, the 'vmdk' parameter's value might look, for example, + * like "-iqn.2010-01.com.solidfire:4nhe.data-32.79-0.vmdk". + * + * Returns the (potentially new) name of the VMDK file. + */ + private String cleanUpDatastore(Command cmd, HostDatastoreSystemMO hostDatastoreSystem, DatastoreMO dsMo, Map details) throws Exception { + boolean expandDatastore = Boolean.parseBoolean(details.get(DiskTO.EXPAND_DATASTORE)); + + // A volume on the storage system holding a template uses a minimum hypervisor snapshot reserve value. + // When this volume is cloned to a new volume, the new volume can be expanded (to take a new hypervisor snapshot reserve value + // into consideration). If expandDatastore is true, we want to expand the datastore in the new volume to the size of the cloned volume. + // It's possible that expandDatastore might be true and there isn't any extra space in the cloned volume (if the hypervisor snapshot + // reserve value in use is set to the minimum for the cloned volume), but that's fine. + if (expandDatastore) { + expandDatastore(hostDatastoreSystem, dsMo); + } + + String vmdk = details.get(DiskTO.VMDK); + String fullVmdkPath = new DatastoreFile(dsMo.getName(), vmdk).getPath(); + + VmwareContext context = hostService.getServiceContext(null); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); + + DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); + + String vmName = getVmName(vmdk); + + // If vmName is not null, then move all VMDK files out of this folder to the root folder and then delete the folder named vmName. + if (vmName != null) { + String workerVmName = hostService.getWorkerName(context, cmd, 0); + + VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVmName); + + if (vmMo == null) { + throw new Exception("Unable to create a worker VM for volume creation"); + } + + vmMo.attachDisk(new String[] { fullVmdkPath }, dsMo.getMor()); + + List backingFiles = new ArrayList<>(1); + + List virtualDisks = vmMo.getVirtualDisks(); + + VirtualDisk virtualDisk = virtualDisks.get(0); + + VirtualDeviceBackingInfo virtualDeviceBackingInfo = virtualDisk.getBacking(); + + while (virtualDeviceBackingInfo instanceof VirtualDiskFlatVer2BackingInfo) { + VirtualDiskFlatVer2BackingInfo backingInfo = (VirtualDiskFlatVer2BackingInfo)virtualDeviceBackingInfo; + + backingFiles.add(backingInfo.getFileName()); + + virtualDeviceBackingInfo = backingInfo.getParent(); + } + + vmMo.detachAllDisks(); + vmMo.destroy(); + + VmwareStorageLayoutHelper.moveVolumeToRootFolder(dcMo, backingFiles); + + vmdk = new DatastoreFile(vmdk).getFileName(); + + // Delete the folder the VMDK file was in. + + DatastoreFile folderToDelete = new DatastoreFile(dsMo.getName(), vmName); + + dsMo.deleteFolder(folderToDelete.getPath(), dcMo.getMor()); + } + + return vmdk; + } + + /** + * Example input for the 'vmdk' parameter: + * i-2-32-VM/ROOT-32.vmdk + * -iqn.2010-01.com.solidfire:4nhe.data-32.79-0.vmdk + */ + private String getVmName(String vmdk) { + int indexOf = vmdk.indexOf("/"); + + if (indexOf == -1) { + return null; + } + + return vmdk.substring(0, indexOf).trim(); + } + + public void expandDatastore(HostDatastoreSystemMO hostDatastoreSystem, DatastoreMO datastoreMO) throws Exception { + List vmfsDatastoreOptions = hostDatastoreSystem.queryVmfsDatastoreExpandOptions(datastoreMO); + + if (vmfsDatastoreOptions != null && vmfsDatastoreOptions.size() > 0) { + VmfsDatastoreExpandSpec vmfsDatastoreExpandSpec = (VmfsDatastoreExpandSpec)vmfsDatastoreOptions.get(0).getSpec(); + + hostDatastoreSystem.expandVmfsDatastore(datastoreMO, vmfsDatastoreExpandSpec); + } } private String getOVFFilePath(String srcOVAFileName) { @@ -184,8 +478,8 @@ private String getOVFFilePath(String srcOVAFileName) { } private Pair copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, - String templatePathAtSecondaryStorage, String templateName, String templateUuid, boolean createSnapshot, Integer nfsVersion) throws Exception { - + String templatePathAtSecondaryStorage, String templateName, String templateUuid, + boolean createSnapshot, Integer nfsVersion) throws Exception { s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName); @@ -225,13 +519,13 @@ private String getOVFFilePath(String srcOVAFileName) { if (vmMo == null) { String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + - ", templateName: " + templateName + ", templateUuid: " + templateUuid; + ", templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } OVAProcessor processor = new OVAProcessor(); - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(StorageLayer.InstanceConfigKey, _storage); processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(secondaryMountPoint + "/" + templatePathAtSecondaryStorage, templateName); @@ -253,7 +547,7 @@ private String getOVFFilePath(String srcOVAFileName) { } } - return new Pair(vmMo, new Long(virtualSize)); + return new Pair<>(vmMo, virtualSize); } @Override @@ -273,7 +567,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { String secondaryStorageUrl = nfsImageStore.getUrl(); - assert (secondaryStorageUrl != null); + assert secondaryStorageUrl != null; boolean managed = false; String storageHost = null; @@ -317,17 +611,19 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { Pair templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName()); VmwareContext context = hostService.getServiceContext(cmd); + if (context == null) { - return new CopyCmdAnswer("Failed to create a Vmware context, check the management server logs or the ssvm log for details"); + return new CopyCmdAnswer("Failed to create a VMware context, check the management server logs or the SSVM log for details"); } + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); + DatastoreMO dsMo = null; + try { - VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); String storageUuid = managed ? managedStoragePoolName : primaryStore.getUuid(); String templateUuidName = deriveTemplateUuidOnHost(hyperHost, storageUuid, templateInfo.second()); DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); - DatastoreMO dsMo = null; Pair vmInfo = null; if (templateMo == null) { @@ -378,14 +674,16 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { TemplateObjectTO newTemplate = new TemplateObjectTO(); if (managed) { - if(dsMo != null) { + if (dsMo != null) { String path = dsMo.getDatastorePath(managedStoragePoolRootVolumeName + ".vmdk"); + newTemplate.setPath(path); } } else { newTemplate.setPath(templateUuidName); } + newTemplate.setSize((vmInfo != null)? vmInfo.second() : new Long(0)); return new CopyCmdAnswer(newTemplate); @@ -400,11 +698,28 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { return new CopyCmdAnswer(msg); } + finally { + if (dsMo != null && managedStoragePoolName != null) { + try { + removeVmfsDatastore(cmd, hyperHost, VmwareResource.getDatastoreName(managedStoragePoolName), storageHost, storagePort, trimIqn(managedStoragePoolName)); + } + catch (Exception ex) { + s_logger.error("Unable to remove the following datastore: " + VmwareResource.getDatastoreName(managedStoragePoolName), ex); + } + } + } + } + + private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, String vmdkName, ManagedObjectReference morDatastore, + ManagedObjectReference morPool) throws Exception { + return createVMLinkedClone(vmTemplate, dcMo, vmdkName, morDatastore, morPool, null); } - private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, - ManagedObjectReference morPool) throws Exception { - ManagedObjectReference morBaseSnapshot = vmTemplate.getSnapshotMor("cloud.template.base"); + private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, String vmdkName, ManagedObjectReference morDatastore, + ManagedObjectReference morPool, ManagedObjectReference morBaseSnapshot) throws Exception { + if (morBaseSnapshot == null) { + morBaseSnapshot = vmTemplate.getSnapshotMor("cloud.template.base"); + } if (morBaseSnapshot == null) { String msg = "Unable to find template base snapshot, invalid template"; @@ -428,7 +743,7 @@ private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dc } private boolean createVMFullClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, - ManagedObjectReference morPool) throws Exception { + ManagedObjectReference morPool) throws Exception { s_logger.info("creating full clone from template"); if (!vmTemplate.createFullClone(vmdkName, dcMo.getVmFolder(), morPool, morDatastore)) { @@ -465,7 +780,7 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { DatastoreMO dsMo = new DatastoreMO(context, morDatastore); String vmdkName = volume.getName(); - String vmdkFileBaseName = null; + String vmdkFileBaseName; if (srcStore == null) { // create a root volume for blank VM (created from ISO) String dummyVmName = hostService.getWorkerName(context, cmd, 0); @@ -508,7 +823,7 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { _fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag; } if (!_fullCloneFlag) { - createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); + createVMLinkedClone(vmTemplate, dcMo, vmdkName, morDatastore, morPool); } else { createVMFullClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); } @@ -568,10 +883,10 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { } } - private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, String srcVolumePath, DatastoreMO dsMo, String secStorageUrl, long wait, Integer nfsVersion) throws Exception { - - String volumeFolder = null; - String volumeName = null; + private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, String srcVolumePath, DatastoreMO dsMo, String secStorageUrl, + long wait, Integer nfsVersion) throws Exception { + String volumeFolder; + String volumeName; String sufix = ".ova"; int index = srcVolumePath.lastIndexOf(File.separator); if (srcVolumePath.endsWith(sufix)) { @@ -585,7 +900,7 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { String newVolume = VmwareHelper.getVCenterSafeUuid(); restoreVolumeFromSecStorage(hyperHost, dsMo, newVolume, secStorageUrl, volumeFolder, volumeName, wait, nfsVersion); - return new Pair(volumeFolder, newVolume); + return new Pair<>(volumeFolder, newVolume); } private String deleteVolumeDirOnSecondaryStorage(String volumeDir, String secStorageUrl, Integer nfsVersion) throws Exception { @@ -652,7 +967,7 @@ private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName, } private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyCommand cmd, String vmName, String poolId, - String volumePath, String destVolumePath, String secStorageUrl, String workerVmName) throws Exception { + String volumePath, String destVolumePath, String secStorageUrl, String workerVmName) throws Exception { VirtualMachineMO workerVm = null; VirtualMachineMO vmMo = null; String exportName = UUID.randomUUID().toString().replace("-", ""); @@ -688,7 +1003,7 @@ private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName, vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false); exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, destVolumePath, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1), _nfsVersion); - return new Pair(destVolumePath, exportName); + return new Pair<>(destVolumePath, exportName); } finally { vmMo.removeSnapshot(exportName, false); @@ -705,7 +1020,6 @@ public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd) { VolumeObjectTO srcVolume = (VolumeObjectTO)cmd.getSrcTO(); VolumeObjectTO destVolume = (VolumeObjectTO)cmd.getDestTO(); String vmName = srcVolume.getVmName(); - String searchExcludedFolders = cmd.getContextParam("searchexludefolders"); VmwareContext context = hostService.getServiceContext(cmd); try { @@ -772,7 +1086,7 @@ private void postCreatePrivateTemplate(String installFullPath, long templateId, } private Ternary createTemplateFromVolume(VirtualMachineMO vmMo, String installPath, long templateId, String templateUniqueName, - String secStorageUrl, String volumePath, String workerVmName, Integer nfsVersion) throws Exception { + String secStorageUrl, String volumePath, String workerVmName, Integer nfsVersion) throws Exception { String secondaryMountPoint = mountService.getMountPoint(secStorageUrl, nfsVersion); String installFullPath = secondaryMountPoint + "/" + installPath; @@ -827,7 +1141,7 @@ private void postCreatePrivateTemplate(String installFullPath, long templateId, long physicalSize = new File(installFullPath + "/" + templateVMDKName).length(); OVAProcessor processor = new OVAProcessor(); - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(StorageLayer.InstanceConfigKey, _storage); processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); @@ -937,7 +1251,7 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename, } private Ternary createTemplateFromSnapshot(String installPath, String templateUniqueName, String secStorageUrl, String snapshotPath, - Long templateId, long wait, Integer nfsVersion) throws Exception { + Long templateId, long wait, Integer nfsVersion) throws Exception { //Snapshot path is decoded in this form: /snapshots/account/volumeId/uuid/uuid String backupSSUuid; String snapshotFolder; @@ -1050,154 +1364,394 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename, } } - long physicalSize = new File(installFullPath + "/" + templateVMDKName).length(); - OVAProcessor processor = new OVAProcessor(); - // long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); - Map params = new HashMap(); - params.put(StorageLayer.InstanceConfigKey, _storage); - processor.configure("OVA Processor", params); - long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); + Size size = handleMetadataCreateTemplateFromSnapshot(installFullPath, templateVMDKName, templateId, templateUniqueName, backupSSUuid); - postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); - writeMetaOvaForTemplate(installFullPath, backupSSUuid + ".ovf", templateVMDKName, templateUniqueName, physicalSize); - return new Ternary(installPath + "/" + templateUniqueName + ".ova", physicalSize, virtualSize); + return new Ternary<>(installPath + "/" + templateUniqueName + ".ova", size.getPhysicalSize(), size.getVirtualSize()); } finally { // TODO, clean up left over files } } - @Override - public Answer createTemplateFromSnapshot(CopyCommand cmd) { - SnapshotObjectTO snapshot = (SnapshotObjectTO)cmd.getSrcTO(); - TemplateObjectTO template = (TemplateObjectTO)cmd.getDestTO(); - DataStoreTO imageStore = template.getDataStore(); - String details; - String uniqeName = UUID.randomUUID().toString(); - - VmwareContext context = hostService.getServiceContext(cmd); - try { - if (!(imageStore instanceof NfsTO)) { - return new CopyCmdAnswer("Only support create template from snapshot, when the dest store is nfs"); - } - - NfsTO nfsSvr = (NfsTO)imageStore; - Ternary result = createTemplateFromSnapshot(template.getPath(), uniqeName, nfsSvr.getUrl(), snapshot.getPath(), template.getId(), (long)cmd.getWait() * 1000, _nfsVersion); + private class Size { + private final long _physicalSize; + private final long _virtualSize; - TemplateObjectTO newTemplate = new TemplateObjectTO(); - newTemplate.setPath(result.first()); - newTemplate.setPhysicalSize(result.second()); - newTemplate.setSize(result.third()); - newTemplate.setFormat(ImageFormat.OVA); - newTemplate.setName(uniqeName); - return new CopyCmdAnswer(newTemplate); - } catch (Throwable e) { - if (e instanceof RemoteException) { - hostService.invalidateServiceContext(context); - } + Size(long physicalSize, long virtualSize) { + _physicalSize = physicalSize; + _virtualSize = virtualSize; + } - s_logger.error("Unexpecpted exception ", e); + long getPhysicalSize() { + return _physicalSize; + } - details = "create template from snapshot exception: " + VmwareHelper.getExceptionMessage(e); - return new CopyCmdAnswer(details); + long getVirtualSize() { + return _virtualSize; } } - // return Pair - private Pair exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, - String exportName, String workerVmName, Integer nfsVersion) throws Exception { + private Size handleMetadataCreateTemplateFromSnapshot(String installFullPath, String templateVMDKName, long templateId, String templateUniqueName, + String ovfFilename) throws Exception { + long physicalSize = new File(installFullPath + "/" + templateVMDKName).length(); - String secondaryMountPoint = mountService.getMountPoint(secStorageUrl, nfsVersion); - String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName; + OVAProcessor processor = new OVAProcessor(); - synchronized (exportPath.intern()) { - if (!new File(exportPath).exists()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); - command.add("-p"); - command.add(exportPath); - if (command.execute() != null) { - throw new Exception("unable to prepare snapshot backup directory"); - } - } - } + Map params = new HashMap<>(); - VirtualMachineMO clonedVm = null; - try { + params.put(StorageLayer.InstanceConfigKey, _storage); - Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); - if (volumeDeviceInfo == null) { - String msg = "Unable to find related disk device for volume. volume path: " + volumePath; - s_logger.error(msg); - throw new Exception(msg); - } + processor.configure("OVA Processor", params); - // 4 MB is the minimum requirement for VM memory in VMware - Pair cloneResult = - vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); - clonedVm = cloneResult.first(); - String disks[] = cloneResult.second(); + long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); - clonedVm.exportVm(exportPath, exportName, false, false); - return new Pair(volumeDeviceInfo.second(), disks); - } finally { - if (clonedVm != null) { - clonedVm.detachAllDisks(); - clonedVm.destroy(); - } - } - } + postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); - // Ternary - private Ternary backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, String installPath, String volumePath, String snapshotUuid, - String secStorageUrl, String prevSnapshotUuid, String prevBackupUuid, String workerVmName, Integer nfsVersion) throws Exception { + writeMetaOvaForTemplate(installFullPath, ovfFilename + ".ovf", templateVMDKName, templateUniqueName, physicalSize); - String backupUuid = UUID.randomUUID().toString(); - Pair snapshotInfo = exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, installPath, backupUuid, workerVmName, nfsVersion); - return new Ternary(backupUuid, snapshotInfo.first(), snapshotInfo.second()); + return new Size(physicalSize, virtualSize); } - @Override - public Answer backupSnapshot(CopyCommand cmd) { - SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO(); - DataStoreTO primaryStore = srcSnapshot.getDataStore(); - SnapshotObjectTO destSnapshot = (SnapshotObjectTO)cmd.getDestTO(); - DataStoreTO destStore = destSnapshot.getDataStore(); - if (!(destStore instanceof NfsTO)) { - return new CopyCmdAnswer("unsupported protocol"); - } + private void setUpManagedStorageCopyTemplateFromSnapshot(CopyCommand cmd) throws Exception { + VmwareContext context = hostService.getServiceContext(cmd); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - NfsTO destNfsStore = (NfsTO)destStore; + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO clusterMO = new ClusterMO(context, morCluster); - String secondaryStorageUrl = destNfsStore.getUrl(); - String snapshotUuid = srcSnapshot.getPath(); - String prevSnapshotUuid = srcSnapshot.getParentSnapshotPath(); - String prevBackupUuid = destSnapshot.getParentSnapshotPath(); - VirtualMachineMO workerVm = null; - String workerVMName = null; - String volumePath = srcSnapshot.getVolume().getPath(); - ManagedObjectReference morDs = null; - DatastoreMO dsMo = null; + List> lstHosts = clusterMO.getClusterHosts(); - // By default assume failure - String details = null; - boolean success = false; - String snapshotBackupUuid = null; + final Map options = cmd.getOptions(); - boolean hasOwnerVm = false; - Ternary backupResult = null; + final String storageHost = options.get(DiskTO.STORAGE_HOST); + final int storagePortNumber = Integer.parseInt(options.get(DiskTO.STORAGE_PORT)); + final String iScsiName = options.get(DiskTO.IQN); + final String snapshotPath = options.get(DiskTO.VMDK); + final String chapInitiatorUsername = options.get(DiskTO.CHAP_INITIATOR_USERNAME); + final String chapInitiatorSecret = options.get(DiskTO.CHAP_INITIATOR_SECRET); + final String chapTargetUsername = options.get(DiskTO.CHAP_TARGET_USERNAME); + final String chapTargetSecret = options.get(DiskTO.CHAP_TARGET_SECRET); - VmwareContext context = hostService.getServiceContext(cmd); - VirtualMachineMO vmMo = null; - String vmName = srcSnapshot.getVmName(); - try { - VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid()); + String datastoreName = getManagedDatastoreNameFromPath(snapshotPath); - CopyCmdAnswer answer = null; + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageHost, lstHosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); - try { - if(vmName != null) { - vmMo = hyperHost.findVmOnHyperHost(vmName); - if (vmMo == null) { + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + final List lstTargets = getTargets(storageHost, storagePortNumber, trimIqn(iScsiName), + chapInitiatorUsername, chapInitiatorSecret, chapTargetUsername, chapTargetSecret); + + addRemoveInternetScsiTargetsToAllHosts(true, lstTargets, hostsUsingStaticDiscovery); + } + + rescanAllHosts(context, lstHosts, true, true); + + Pair firstHost = lstHosts.get(0); + HostMO firstHostMO = new HostMO(context, firstHost.first()); + HostDatastoreSystemMO firstHostDatastoreSystemMO = firstHostMO.getHostDatastoreSystemMO(); + ManagedObjectReference morDs = firstHostDatastoreSystemMO.findDatastoreByName(datastoreName); + DatastoreMO datastoreMO = new DatastoreMO(context, morDs); + + mountVmfsDatastore(datastoreMO, lstHosts); + } + + private void takeDownManagedStorageCopyTemplateFromSnapshot(CopyCommand cmd) throws Exception { + VmwareContext context = hostService.getServiceContext(cmd); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); + + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO clusterMO = new ClusterMO(context, morCluster); + + List> lstHosts = clusterMO.getClusterHosts(); + + final Map options = cmd.getOptions(); + + final String storageHost = options.get(DiskTO.STORAGE_HOST); + final int storagePortNumber = Integer.parseInt(options.get(DiskTO.STORAGE_PORT)); + final String iScsiName = options.get(DiskTO.IQN); + final String snapshotPath = options.get(DiskTO.VMDK); + + String datastoreName = getManagedDatastoreNameFromPath(snapshotPath); + + unmountVmfsDatastore(context, hyperHost, datastoreName, lstHosts); + + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageHost, lstHosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); + + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + final List lstTargets = getTargets(storageHost, storagePortNumber, trimIqn(iScsiName), + null, null, null, null); + + addRemoveInternetScsiTargetsToAllHosts(false, lstTargets, hostsUsingStaticDiscovery); + + rescanAllHosts(context, lstHosts, true, false); + } + } + + private void createTemplateFolder(String installPath, String installFullPath, NfsTO nfsSvr) { + synchronized (installPath.intern()) { + Script command = new Script(false, "mkdir", _timeout, s_logger); + + command.add("-p"); + command.add(installFullPath); + + String result = command.execute(); + + if (result != null) { + String secStorageUrl = nfsSvr.getUrl(); + String msg = "unable to prepare template directory: " + installPath + "; storage: " + secStorageUrl + "; error msg: " + result; + + s_logger.error(msg); + + throw new CloudRuntimeException(msg); + } + } + } + + private void exportManagedStorageSnapshotToTemplate(CopyCommand cmd, String installFullPath, String snapshotPath, String exportName) throws Exception { + DatastoreFile dsFile = new DatastoreFile(snapshotPath); + + VmwareContext context = hostService.getServiceContext(cmd); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); + + String workerVMName = hostService.getWorkerName(context, cmd, 0); + + ManagedObjectReference dsMor = hyperHost.findDatastoreByName(dsFile.getDatastoreName()); + DatastoreMO dsMo = new DatastoreMO(context, dsMor); + + VirtualMachineMO workerVM = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName); + + if (workerVM == null) { + throw new CloudRuntimeException("Failed to find the newly created worker VM: " + workerVMName); + } + + workerVM.attachDisk(new String[]{snapshotPath}, dsMor); + + workerVM.exportVm(installFullPath, exportName, false, false); + + workerVM.detachAllDisks(); + workerVM.destroy(); + } + + private String getTemplateVmdkName(String installFullPath, String exportName) { + File templateDir = new File(installFullPath); + File[] templateFiles = templateDir.listFiles(); + + if (templateFiles == null) { + String msg = "Unable to find template files in " + installFullPath; + + s_logger.error(msg); + + throw new CloudRuntimeException(msg); + } + + for (int i = 0; i < templateFiles.length; i++) { + String templateFile = templateFiles[i].getName(); + + if (templateFile.toLowerCase().startsWith(exportName) && templateFile.toLowerCase().endsWith(".vmdk")) { + return templateFile; + } + } + + throw new CloudRuntimeException("Unable to locate the template VMDK file"); + } + + private Answer handleManagedStorageCreateTemplateFromSnapshot(CopyCommand cmd, TemplateObjectTO template, NfsTO nfsSvr) { + try { + setUpManagedStorageCopyTemplateFromSnapshot(cmd); + + final Map options = cmd.getOptions(); + + String snapshotPath = options.get(DiskTO.VMDK); + + String secondaryMountPoint = mountService.getMountPoint(nfsSvr.getUrl(), _nfsVersion); + String installPath = template.getPath(); + String installFullPath = secondaryMountPoint + "/" + installPath; + + createTemplateFolder(installPath, installFullPath, nfsSvr); + + String exportName = UUID.randomUUID().toString(); + + exportManagedStorageSnapshotToTemplate(cmd, installFullPath, snapshotPath, exportName); + + String templateVmdkName = getTemplateVmdkName(installFullPath, exportName); + + String uniqueName = options.get(DiskTO.UUID); + + Size size = handleMetadataCreateTemplateFromSnapshot(installFullPath, templateVmdkName, template.getId(), uniqueName, exportName); + + TemplateObjectTO newTemplate = new TemplateObjectTO(); + + newTemplate.setPath(installPath + "/" + uniqueName + ".ova"); + newTemplate.setPhysicalSize(size.getPhysicalSize()); + newTemplate.setSize(size.getVirtualSize()); + newTemplate.setFormat(ImageFormat.OVA); + newTemplate.setName(uniqueName); + + return new CopyCmdAnswer(newTemplate); + } + catch (Exception ex) { + String errMsg = "Problem creating a template from a snapshot for managed storage: " + ex.getMessage(); + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg, ex); + } + finally { + try { + takeDownManagedStorageCopyTemplateFromSnapshot(cmd); + } + catch (Exception ex) { + s_logger.warn("Unable to remove one or more static targets"); + } + } + } + + @Override + public Answer createTemplateFromSnapshot(CopyCommand cmd) { + String details; + + SnapshotObjectTO snapshot = (SnapshotObjectTO)cmd.getSrcTO(); + TemplateObjectTO template = (TemplateObjectTO)cmd.getDestTO(); + + DataStoreTO imageStore = template.getDataStore(); + + String uniqueName = UUID.randomUUID().toString(); + + VmwareContext context = hostService.getServiceContext(cmd); + + try { + if (!(imageStore instanceof NfsTO)) { + return new CopyCmdAnswer("Creating a template from a snapshot is only supported when the destination store is NFS."); + } + + NfsTO nfsSvr = (NfsTO)imageStore; + + if (snapshot.getDataStore() instanceof PrimaryDataStoreTO && template.getDataStore() instanceof NfsTO) { + return handleManagedStorageCreateTemplateFromSnapshot(cmd, template, nfsSvr); + } + + Ternary result = createTemplateFromSnapshot(template.getPath(), uniqueName, nfsSvr.getUrl(), snapshot.getPath(), template.getId(), + cmd.getWait() * 1000, _nfsVersion); + + TemplateObjectTO newTemplate = new TemplateObjectTO(); + + newTemplate.setPath(result.first()); + newTemplate.setPhysicalSize(result.second()); + newTemplate.setSize(result.third()); + newTemplate.setFormat(ImageFormat.OVA); + newTemplate.setName(uniqueName); + + return new CopyCmdAnswer(newTemplate); + } catch (Throwable e) { + if (e instanceof RemoteException) { + hostService.invalidateServiceContext(context); + } + + s_logger.error("Unexpected exception ", e); + + details = "create template from snapshot exception: " + VmwareHelper.getExceptionMessage(e); + + return new CopyCmdAnswer(details); + } + } + + // return Pair + private Pair exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, + String exportName, String workerVmName, Integer nfsVersion) throws Exception { + + String secondaryMountPoint = mountService.getMountPoint(secStorageUrl, nfsVersion); + String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName; + + synchronized (exportPath.intern()) { + if (!new File(exportPath).exists()) { + Script command = new Script(false, "mkdir", _timeout, s_logger); + command.add("-p"); + command.add(exportPath); + if (command.execute() != null) { + throw new Exception("unable to prepare snapshot backup directory"); + } + } + } + + VirtualMachineMO clonedVm = null; + try { + + Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); + if (volumeDeviceInfo == null) { + String msg = "Unable to find related disk device for volume. volume path: " + volumePath; + s_logger.error(msg); + throw new Exception(msg); + } + + // 4 MB is the minimum requirement for VM memory in VMware + Pair cloneResult = + vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); + clonedVm = cloneResult.first(); + String disks[] = cloneResult.second(); + + clonedVm.exportVm(exportPath, exportName, false, false); + return new Pair<>(volumeDeviceInfo.second(), disks); + } finally { + if (clonedVm != null) { + clonedVm.detachAllDisks(); + clonedVm.destroy(); + } + } + } + + // Ternary + private Ternary backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, String installPath, String volumePath, String snapshotUuid, + String secStorageUrl, String prevSnapshotUuid, String prevBackupUuid, String workerVmName, + Integer nfsVersion) throws Exception { + + String backupUuid = UUID.randomUUID().toString(); + Pair snapshotInfo = exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, installPath, backupUuid, workerVmName, nfsVersion); + return new Ternary<>(backupUuid, snapshotInfo.first(), snapshotInfo.second()); + } + + @Override + public Answer backupSnapshot(CopyCommand cmd) { + SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO(); + DataStoreTO primaryStore = srcSnapshot.getDataStore(); + SnapshotObjectTO destSnapshot = (SnapshotObjectTO)cmd.getDestTO(); + DataStoreTO destStore = destSnapshot.getDataStore(); + if (!(destStore instanceof NfsTO)) { + return new CopyCmdAnswer("unsupported protocol"); + } + + NfsTO destNfsStore = (NfsTO)destStore; + + String secondaryStorageUrl = destNfsStore.getUrl(); + String snapshotUuid = srcSnapshot.getPath(); + String prevSnapshotUuid = srcSnapshot.getParentSnapshotPath(); + String prevBackupUuid = destSnapshot.getParentSnapshotPath(); + VirtualMachineMO workerVm = null; + String workerVMName = null; + String volumePath = srcSnapshot.getVolume().getPath(); + ManagedObjectReference morDs; + DatastoreMO dsMo; + + // By default assume failure + String details; + boolean success; + String snapshotBackupUuid; + + boolean hasOwnerVm = false; + Ternary backupResult = null; + + VmwareContext context = hostService.getServiceContext(cmd); + VirtualMachineMO vmMo = null; + String vmName = srcSnapshot.getVmName(); + try { + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); + morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid()); + + CopyCmdAnswer answer = null; + + try { + if(vmName != null) { + vmMo = hyperHost.findVmOnHyperHost(vmName); + if (vmMo == null) { if(s_logger.isDebugEnabled()) { s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); } @@ -1237,7 +1791,7 @@ public Answer backupSnapshot(CopyCommand cmd) { details = "Successfully backedUp the snapshot with Uuid: " + snapshotUuid + " to secondary storage."; // Get snapshot physical size - long physicalSize = 0l; + long physicalSize = 0; String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, _nfsVersion); String snapshotDir = destSnapshot.getPath() + "/" + snapshotBackupUuid; File[] files = new File(secondaryMountPoint + "/" + snapshotDir).listFiles(); @@ -1350,27 +1904,39 @@ public Answer attachVolume(AttachCommand cmd) { return this.attachVolume(cmd, cmd.getDisk(), true, isManaged, cmd.getVmName(), iScsiName, storageHost, storagePort, cmd.getControllerInfo()); } - private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean isManaged, String vmName, String iScsiName, String storageHost, int storagePort, Map controllerInfo) { + private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean isManaged, String vmName, String iScsiName, + String storageHost, int storagePort, Map controllerInfo) { VolumeObjectTO volumeTO = (VolumeObjectTO)disk.getData(); DataStoreTO primaryStore = volumeTO.getDataStore(); + + String vmdkPath = isManaged ? resource.getVmdkPath(volumeTO.getPath()) : null; + try { VmwareContext context = hostService.getServiceContext(null); VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); + if (vmMo == null) { - String msg = "Unable to find the VM to execute AttachCommand, vmName: " + vmName; - s_logger.error(msg); - throw new Exception(msg); + vmMo = hyperHost.findVmOnPeerHyperHost(vmName); + + if (vmMo == null) { + String msg = "Unable to find the VM to execute AttachCommand, vmName: " + vmName; + + s_logger.error(msg); + + throw new Exception(msg); + } } + vmName = vmMo.getName(); - ManagedObjectReference morDs = null; + ManagedObjectReference morDs; String diskUuid = volumeTO.getUuid().replace("-", ""); if (isAttach && isManaged) { Map details = disk.getDetails(); - morDs = prepareManagedStorage(context, hyperHost, diskUuid, iScsiName, storageHost, storagePort, null, + morDs = prepareManagedStorage(context, hyperHost, diskUuid, iScsiName, storageHost, storagePort, vmdkPath, details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET), volumeTO.getSize(), cmd); @@ -1394,13 +1960,13 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean if (isAttach) { if (isManaged) { - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + datastoreVolumePath = dsMo.getDatastorePath((vmdkPath != null ? vmdkPath : dsMo.getName()) + ".vmdk"); } else { datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value()); } } else { if (isManaged) { - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + datastoreVolumePath = dsMo.getDatastorePath((vmdkPath != null ? vmdkPath : dsMo.getName()) + ".vmdk"); } else { datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk"); @@ -1416,20 +1982,26 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean if (isAttach) { String diskController = getLegacyVmDataDiskController(); - if (controllerInfo != null && - !Strings.isNullOrEmpty(controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER))) { + + if (controllerInfo != null && !Strings.isNullOrEmpty(controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER))) { diskController = controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER); } + if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) { diskController = vmMo.getRecommendedDiskController(null); } - vmMo.attachDisk(new String[] {datastoreVolumePath}, morDs, diskController); + + vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs, diskController); + + if (isManaged) { + expandVirtualDisk(vmMo, datastoreVolumePath, volumeTO.getSize()); + } } else { vmMo.removeAllSnapshots(); vmMo.detachDisk(datastoreVolumePath, false); if (isManaged) { - handleDatastoreAndVmdkDetachManaged(diskUuid, iScsiName, storageHost, storagePort); + handleDatastoreAndVmdkDetachManaged(cmd, diskUuid, iScsiName, storageHost, storagePort); } else { VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath(), vmName, VmwareManager.s_vmwareSearchExcludeFolder.value()); } @@ -1439,19 +2011,66 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + hostService.invalidateServiceContext(null); } String msg = ""; - if (isAttach) + + if (isAttach) { msg += "Failed to attach volume: " + e.getMessage(); - else + } + else { msg += "Failed to detach volume: " + e.getMessage(); + } + s_logger.error(msg, e); + return new AttachAnswer(msg); } } + private boolean expandVirtualDisk(VirtualMachineMO vmMo, String datastoreVolumePath, long currentSizeInBytes) throws Exception { + long currentSizeInKB = currentSizeInBytes / 1024; + + Pair vDiskPair = vmMo.getDiskDevice(datastoreVolumePath); + + VirtualDisk vDisk = vDiskPair.first(); + + if (vDisk.getCapacityInKB() < currentSizeInKB) { + // IDE virtual disk cannot be re-sized if VM is running + if (vDiskPair.second() != null && vDiskPair.second().contains("ide")) { + throw new Exception("Re-sizing a virtual disk over an IDE controller is not supported in VMware hypervisor. " + + "Please re-try when virtual disk is attached to a VM using a SCSI controller."); + } + + String vmdkAbsFile = resource.getAbsoluteVmdkFile(vDisk); + + if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { + vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); + } + + vDisk.setCapacityInKB(currentSizeInKB); + + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + + deviceConfigSpec.setDevice(vDisk); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); + + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); + + if (!vmMo.configureVm(vmConfigSpec)) { + throw new Exception("Failed to configure VM to resize disk. vmName: " + vmMo.getName()); + } + + return true; + } + + return false; + } + private static String getSecondaryDatastoreUUID(String storeUrl) { String uuid = null; try{ @@ -1462,7 +2081,7 @@ private static String getSecondaryDatastoreUUID(String storeUrl) { return uuid; } - public synchronized ManagedObjectReference prepareSecondaryDatastoreOnHost(String storeUrl) throws Exception { + private synchronized ManagedObjectReference prepareSecondaryDatastoreOnHost(String storeUrl) throws Exception { String storeName = getSecondaryDatastoreUUID(storeUrl); URI uri = new URI(storeUrl); @@ -1607,7 +2226,7 @@ public Answer createVolume(CreateObjectCommand cmd) { } catch (Exception e) { s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e.getMessage()); - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid.toString(), dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value()); + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value()); throw new CloudRuntimeException("Unable to create volume due to: " + e.getMessage()); } } @@ -1702,6 +2321,9 @@ public Answer deleteVolume(DeleteCommand cmd) { DatacenterMO dcMo = new DatacenterMO(context, morDc); vmMo = dcMo.findVm(vmName); } + + List> dynamicTargetsToRemove = null; + if (vmMo != null) { if (s_logger.isInfoEnabled()) { s_logger.info("Destroy root volume and VM itself. vmName " + vmName); @@ -1725,7 +2347,7 @@ public Answer deleteVolume(DeleteCommand cmd) { // don't remove the iSCSI connection(s) until the supported disk(s) is/are removed from the VM // (removeManagedTargetsFromCluster should be called after detachAllDisksExcept and vm.destroy) List virtualDisks = vmMo.getVirtualDisks(); - List managedIqns = getManagedIqnsFromVirtualDisks(virtualDisks); + List managedDatastoreNames = getManagedDatastoreNamesFromVirtualDisks(virtualDisks); List detachedDisks = vmMo.detachAllDisksExcept(vol.getPath(), diskInfo != null ? diskInfo.getDiskDeviceBusName() : null); VmwareStorageLayoutHelper.moveVolumeToRootFolder(new DatacenterMO(context, morDc), detachedDisks); @@ -1741,8 +2363,8 @@ public Answer deleteVolume(DeleteCommand cmd) { } // this.hostService.handleDatastoreAndVmdkDetach(iScsiName, storageHost, storagePort); - if (managedIqns != null && !managedIqns.isEmpty()) { - removeManagedTargetsFromCluster(managedIqns); + if (managedDatastoreNames != null && !managedDatastoreNames.isEmpty()) { + removeManagedTargetsFromCluster(managedDatastoreNames); } for (NetworkDetails netDetails : networks) { @@ -1761,7 +2383,8 @@ public Answer deleteVolume(DeleteCommand cmd) { VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); */ - return new Answer(cmd, true, "Success"); + + return new Answer(cmd, true, ""); } if (s_logger.isInfoEnabled()) { @@ -1769,7 +2392,9 @@ public Answer deleteVolume(DeleteCommand cmd) { } } - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc), VmwareManager.s_vmwareSearchExcludeFolder.value()); + if (!isManaged) { + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc), VmwareManager.s_vmwareSearchExcludeFolder.value()); + } return new Answer(cmd, true, "Success"); } catch (Throwable e) { @@ -1785,16 +2410,16 @@ public Answer deleteVolume(DeleteCommand cmd) { } public ManagedObjectReference prepareManagedDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName, - String iScsiName, String storageHost, int storagePort) throws Exception { + String iScsiName, String storageHost, int storagePort) throws Exception { return getVmfsDatastore(context, hyperHost, datastoreName, storageHost, storagePort, trimIqn(iScsiName), null, null, null, null); } private ManagedObjectReference prepareManagedDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String diskUuid, String iScsiName, - String storageHost, int storagePort, String chapInitiatorUsername, String chapInitiatorSecret, - String chapTargetUsername, String chapTargetSecret) throws Exception { + String storageHost, int storagePort, String chapInitiatorUsername, String chapInitiatorSecret, + String chapTargetUsername, String chapTargetSecret) throws Exception { if (storagePort == DEFAULT_NFS_PORT) { s_logger.info("creating the NFS datastore with the following configuration - storageHost: " + storageHost + ", storagePort: " + storagePort + - ", exportpath: " + iScsiName + "and diskUuid : " + diskUuid); + ", exportpath: " + iScsiName + "and diskUuid : " + diskUuid); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ClusterMO cluster = new ClusterMO(context, morCluster); List> lstHosts = cluster.getClusterHosts(); @@ -1802,85 +2427,173 @@ private ManagedObjectReference prepareManagedDatastore(VmwareContext context, Vm HostMO host = new HostMO(context, lstHosts.get(0).first()); HostDatastoreSystemMO hostDatastoreSystem = host.getHostDatastoreSystemMO(); - return hostDatastoreSystem.createNfsDatastore(storageHost, storagePort, iScsiName, diskUuid); - } else { - return getVmfsDatastore(context, hyperHost, VmwareResource.getDatastoreName(iScsiName), storageHost, storagePort, - trimIqn(iScsiName), chapInitiatorUsername, chapInitiatorSecret, chapTargetUsername, chapTargetSecret); - } + return hostDatastoreSystem.createNfsDatastore(storageHost, storagePort, iScsiName, diskUuid); + } else { + return getVmfsDatastore(context, hyperHost, VmwareResource.getDatastoreName(iScsiName), storageHost, storagePort, + trimIqn(iScsiName), chapInitiatorUsername, chapInitiatorSecret, chapTargetUsername, chapTargetSecret); + } + } + + private List getTargets(String storageIpAddress, int storagePortNumber, String iqn, + String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) { + HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + + target.setAddress(storageIpAddress); + target.setPort(storagePortNumber); + target.setIScsiName(iqn); + + if (StringUtils.isNotBlank(chapName) && StringUtils.isNotBlank(chapSecret)) { + HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties(); + + String strAuthType = "chapRequired"; + + auth.setChapAuthEnabled(true); + auth.setChapInherited(false); + auth.setChapAuthenticationType(strAuthType); + auth.setChapName(chapName); + auth.setChapSecret(chapSecret); + + if (StringUtils.isNotBlank(mutualChapName) && StringUtils.isNotBlank(mutualChapSecret)) { + auth.setMutualChapInherited(false); + auth.setMutualChapAuthenticationType(strAuthType); + auth.setMutualChapName(mutualChapName); + auth.setMutualChapSecret(mutualChapSecret); + } + + target.setAuthenticationProperties(auth); + } + + final List lstTargets = new ArrayList<>(); + + lstTargets.add(target); + + return lstTargets; + } + + private class HostDiscoveryMethod { + private final List hostsUsingDynamicDiscovery; + private final List hostsUsingStaticDiscovery; + + HostDiscoveryMethod(List hostsUsingDynamicDiscovery, List hostsUsingStaticDiscovery) { + this.hostsUsingDynamicDiscovery = hostsUsingDynamicDiscovery; + this.hostsUsingStaticDiscovery = hostsUsingStaticDiscovery; + } + + List getHostsUsingDynamicDiscovery() { + return hostsUsingDynamicDiscovery; + } + + List getHostsUsingStaticDiscovery() { + return hostsUsingStaticDiscovery; + } + } + + private HostDiscoveryMethod getHostDiscoveryMethod(VmwareContext context, String address, + List> hostPairs) throws Exception { + List hosts = new ArrayList<>(); + + for (Pair hostPair : hostPairs) { + HostMO host = new HostMO(context, hostPair.first()); + + hosts.add(host); + } + + return getHostDiscoveryMethod(address, hosts); } - private ManagedObjectReference getVmfsDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, - String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception { - ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); - ClusterMO cluster = new ClusterMO(context, morCluster); - List> lstHosts = cluster.getClusterHosts(); + private HostDiscoveryMethod getHostDiscoveryMethod(String address, List lstHosts) throws Exception { + List hostsUsingDynamicDiscovery = new ArrayList<>(); + List hostsUsingStaticDiscovery = new ArrayList<>(); - HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + for (HostMO host : lstHosts) { + boolean usingDynamicDiscovery = false; - target.setAddress(storageIpAddress); - target.setPort(storagePortNumber); - target.setIScsiName(iqn); + HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); - if (StringUtils.isNotBlank(chapName) && StringUtils.isNotBlank(chapSecret)) { - HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties(); + for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) { + if (hba instanceof HostInternetScsiHba) { + HostInternetScsiHba hostInternetScsiHba = (HostInternetScsiHba)hba; - String strAuthType = "chapRequired"; + if (hostInternetScsiHba.isIsSoftwareBased()) { + List sendTargets = hostInternetScsiHba.getConfiguredSendTarget(); - auth.setChapAuthEnabled(true); - auth.setChapInherited(false); - auth.setChapAuthenticationType(strAuthType); - auth.setChapName(chapName); - auth.setChapSecret(chapSecret); + if (sendTargets != null) { + for (HostInternetScsiHbaSendTarget sendTarget : sendTargets) { + String sendTargetAddress = sendTarget.getAddress(); - if (StringUtils.isNotBlank(mutualChapName) && StringUtils.isNotBlank(mutualChapSecret)) { - auth.setMutualChapInherited(false); - auth.setMutualChapAuthenticationType(strAuthType); - auth.setMutualChapName(mutualChapName); - auth.setMutualChapSecret(mutualChapSecret); + if (sendTargetAddress.contains(address)) { + usingDynamicDiscovery = true; + } + } + } + } + } } - target.setAuthenticationProperties(auth); + if (usingDynamicDiscovery) { + hostsUsingDynamicDiscovery.add(host); + } + else { + hostsUsingStaticDiscovery.add(host); + } } - final List lstTargets = new ArrayList(); + return new HostDiscoveryMethod(hostsUsingDynamicDiscovery, hostsUsingStaticDiscovery); + } - lstTargets.add(target); + private ManagedObjectReference getVmfsDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception { + ManagedObjectReference morDs; - addRemoveInternetScsiTargetsToAllHosts(context, true, lstTargets, lstHosts); + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + ClusterMO cluster = new ClusterMO(context, morCluster); + List> lstHosts = cluster.getClusterHosts(); - rescanAllHosts(context, lstHosts); + Pair firstHost = lstHosts.get(0); + HostMO firstHostMO = new HostMO(context, firstHost.first()); + HostDatastoreSystemMO firstHostDatastoreSystemMO = firstHostMO.getHostDatastoreSystemMO(); - HostMO host = new HostMO(context, lstHosts.get(0).first()); - HostDatastoreSystemMO hostDatastoreSystem = host.getHostDatastoreSystemMO(); + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageIpAddress, lstHosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); - ManagedObjectReference morDs = hostDatastoreSystem.findDatastoreByName(datastoreName); + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + List lstTargets = getTargets(storageIpAddress, storagePortNumber, iqn, + chapName, chapSecret, mutualChapName, mutualChapSecret); - if (morDs != null) { - return morDs; + addRemoveInternetScsiTargetsToAllHosts(true, lstTargets, hostsUsingStaticDiscovery); } - rescanAllHosts(context, lstHosts); + rescanAllHosts(context, lstHosts, true, false); - HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); - List lstHostScsiDisks = hostDatastoreSystem.queryAvailableDisksForVmfs(); + HostStorageSystemMO firstHostStorageSystem = firstHostMO.getHostStorageSystemMO(); + List lstHostScsiDisks = firstHostDatastoreSystemMO.queryAvailableDisksForVmfs(); - HostScsiDisk hostScsiDisk = getHostScsiDisk(hostStorageSystem.getStorageDeviceInfo().getScsiTopology(), lstHostScsiDisks, iqn); + HostScsiDisk hostScsiDisk = getHostScsiDisk(firstHostStorageSystem.getStorageDeviceInfo().getScsiTopology(), lstHostScsiDisks, iqn); if (hostScsiDisk == null) { - // check to see if the datastore actually does exist already - morDs = hostDatastoreSystem.findDatastoreByName(datastoreName); + rescanAllHosts(context, lstHosts, false, true); + + morDs = firstHostDatastoreSystemMO.findDatastoreByName(datastoreName); if (morDs != null) { + waitForAllHostsToSeeDatastore(lstHosts, new DatastoreMO(context, morDs)); + + mountVmfsDatastore(new DatastoreMO(context, morDs), lstHosts); + + expandDatastore(firstHostDatastoreSystemMO, new DatastoreMO(context, morDs)); + return morDs; } throw new Exception("A relevant SCSI disk could not be located to use to create a datastore."); } - morDs = hostDatastoreSystem.createVmfsDatastore(datastoreName, hostScsiDisk); + morDs = firstHostDatastoreSystemMO.createVmfsDatastore(datastoreName, hostScsiDisk); if (morDs != null) { - rescanAllHosts(context, lstHosts); + waitForAllHostsToMountDatastore(lstHosts, new DatastoreMO(context, morDs)); + + expandDatastore(firstHostDatastoreSystemMO, new DatastoreMO(context, morDs)); return morDs; } @@ -1888,6 +2601,89 @@ private ManagedObjectReference getVmfsDatastore(VmwareContext context, VmwareHyp throw new Exception("Unable to create a datastore"); } + private void waitForAllHostsToSeeDatastore(List> lstHosts, DatastoreMO dsMO) throws Exception { + long secondsToWait = 120; + long endWaitTime = System.currentTimeMillis() + secondsToWait * 1000; + + boolean isConditionMet = false; + + while (System.currentTimeMillis() < endWaitTime && !isConditionMet) { + Thread.sleep(5000); + + isConditionMet = verifyAllHostsSeeDatastore(lstHosts, dsMO); + } + + if (!isConditionMet) { + throw new CloudRuntimeException("Not all hosts mounted the datastore"); + } + } + + private boolean verifyAllHostsSeeDatastore(List> lstHosts, DatastoreMO dsMO) throws Exception { + int numHostsChecked = 0; + + for (Pair host: lstHosts) { + ManagedObjectReference morHostToMatch = host.first(); + HostMO hostToMatchMO = new HostMO(dsMO.getContext(), morHostToMatch); + + List datastoreHostMounts = dsMO.getHostMounts(); + + for (DatastoreHostMount datastoreHostMount : datastoreHostMounts) { + ManagedObjectReference morHost = datastoreHostMount.getKey(); + HostMO hostMO = new HostMO(dsMO.getContext(), morHost); + + if (hostMO.getHostName().equals(hostToMatchMO.getHostName())) { + numHostsChecked++; + } + } + } + + return lstHosts.size() == numHostsChecked; + } + + private void waitForAllHostsToMountDatastore(List> lstHosts, DatastoreMO dsMO) throws Exception { + long secondsToWait = 120; + long endWaitTime = System.currentTimeMillis() + secondsToWait * 1000; + + boolean isConditionMet = false; + + while (System.currentTimeMillis() < endWaitTime && !isConditionMet) { + Thread.sleep(5000); + + isConditionMet = verifyAllHostsMountedDatastore(lstHosts, dsMO); + } + + if (!isConditionMet) { + throw new CloudRuntimeException("Not all hosts mounted the datastore"); + } + } + + private boolean verifyAllHostsMountedDatastore(List> lstHosts, DatastoreMO dsMO) throws Exception { + int numHostsChecked = 0; + + for (Pair host: lstHosts) { + ManagedObjectReference morHostToMatch = host.first(); + HostMO hostToMatchMO = new HostMO(dsMO.getContext(), morHostToMatch); + + List datastoreHostMounts = dsMO.getHostMounts(); + + for (DatastoreHostMount datastoreHostMount : datastoreHostMounts) { + ManagedObjectReference morHost = datastoreHostMount.getKey(); + HostMO hostMO = new HostMO(dsMO.getContext(), morHost); + + if (hostMO.getHostName().equals(hostToMatchMO.getHostName())) { + if (datastoreHostMount.getMountInfo().isMounted() && datastoreHostMount.getMountInfo().isAccessible()) { + numHostsChecked++; + } + else { + return false; + } + } + } + } + + return lstHosts.size() == numHostsChecked; + } + // the purpose of this method is to find the HostScsiDisk in the passed-in array that exists (if any) because // we added the static iqn to an iSCSI HBA private static HostScsiDisk getHostScsiDisk(HostScsiTopology hst, List lstHostScsiDisks, String iqn) { @@ -1914,27 +2710,130 @@ private static HostScsiDisk getHostScsiDisk(HostScsiTopology hst, List datastoreHostMounts = dsMO.getHostMounts(); - VmwareContext context = hostService.getServiceContext(null); + for (DatastoreHostMount datastoreHostMount : datastoreHostMounts) { + ManagedObjectReference morHost = datastoreHostMount.getKey(); + HostMO hostMO = new HostMO(dsMO.getContext(), morHost); + + if (hostMO.getHostName().equals(hostToMatchMO.getHostName())) { + return datastoreHostMount.getMountInfo().isMounted(); + } + } + + throw new CloudRuntimeException("Unable to locate the applicable host"); + } + + private String getDatastoreUuid(DatastoreMO dsMO, HostMO hostToMatchMO) throws Exception { + List datastoreHostMounts = dsMO.getHostMounts(); + + for (DatastoreHostMount datastoreHostMount : datastoreHostMounts) { + ManagedObjectReference morHost = datastoreHostMount.getKey(); + HostMO hostMO = new HostMO(dsMO.getContext(), morHost); + + if (hostMO.getHostName().equals(hostToMatchMO.getHostName())) { + String path = datastoreHostMount.getMountInfo().getPath(); + + String searchStr = "/vmfs/volumes/"; + int index = path.indexOf(searchStr); + + if (index == -1) { + throw new CloudRuntimeException("Unable to find the following search string: " + searchStr); + } + + return path.substring(index + searchStr.length()); + } + } + + throw new CloudRuntimeException("Unable to locate the UUID of the datastore"); + } + + private void mountVmfsDatastore(DatastoreMO dsMO, List> hosts) throws Exception { + for (Pair host : hosts) { + HostMO hostMO = new HostMO(dsMO.getContext(), host.first()); + + if (!isDatastoreMounted(dsMO, hostMO)) { + HostStorageSystemMO hostStorageSystemMO = hostMO.getHostStorageSystemMO(); + + try { + hostStorageSystemMO.mountVmfsVolume(getDatastoreUuid(dsMO, hostMO)); + } + catch (InvalidStateFaultMsg ex) { + List> currentHosts = new ArrayList<>(1); + + currentHosts.add(host); + + waitForAllHostsToMountDatastore(currentHosts, dsMO); + } + } + } + } + + private void unmountVmfsDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName, + List> hosts) throws Exception { + ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, datastoreName); + DatastoreMO dsMO = new DatastoreMO(context, morDs); + + for (Pair host : hosts) { + HostMO hostMO = new HostMO(context, host.first()); + + HostStorageSystemMO hostStorageSystemMO = hostMO.getHostStorageSystemMO(); + + hostStorageSystemMO.unmountVmfsVolume(getDatastoreUuid(dsMO, hostMO)); + } + } + + private List getTargets(List> targets) { + List iScsiTargets = new ArrayList<>(); + + for (Map target : targets) { + HostInternetScsiHbaStaticTarget iScsiTarget = new HostInternetScsiHbaStaticTarget(); + + iScsiTarget.setAddress(target.get(ModifyTargetsCommand.STORAGE_HOST)); + iScsiTarget.setPort(Integer.parseInt(target.get(ModifyTargetsCommand.STORAGE_PORT))); + iScsiTarget.setIScsiName(trimIqn(target.get(ModifyTargetsCommand.IQN))); + + iScsiTargets.add(iScsiTarget); + } + + return iScsiTargets; + } + + private void removeVmfsDatastore(Command cmd, VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn) throws Exception { + VmwareContext context = hostService.getServiceContext(cmd); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ClusterMO cluster = new ClusterMO(context, morCluster); List> lstHosts = cluster.getClusterHosts(); - HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + removeVmfsDatastore(cmd, hyperHost, datastoreName, storageIpAddress, storagePortNumber, iqn, lstHosts); + } - target.setAddress(storageIpAddress); - target.setPort(storagePortNumber); - target.setIScsiName(iqn); + private void removeVmfsDatastore(Command cmd, VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, List> lstHosts) throws Exception { + VmwareContext context = hostService.getServiceContext(cmd); - final List lstTargets = new ArrayList(); + unmountVmfsDatastore(context, hyperHost, datastoreName, lstHosts); - lstTargets.add(target); + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageIpAddress, lstHosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); + + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + + target.setAddress(storageIpAddress); + target.setPort(storagePortNumber); + target.setIScsiName(iqn); + + final List lstTargets = new ArrayList<>(); + + lstTargets.add(target); - addRemoveInternetScsiTargetsToAllHosts(context, false, lstTargets, lstHosts); + addRemoveInternetScsiTargetsToAllHosts(false, lstTargets, hostsUsingStaticDiscovery); - rescanAllHosts(context, lstHosts); + rescanAllHosts(hostsUsingStaticDiscovery, true, false); + } } private void createVmdk(Command cmd, DatastoreMO dsMo, String vmdkDatastorePath, Long volumeSize) throws Exception { @@ -1960,59 +2859,105 @@ private static int getMBsFromBytes(long bytes) { return (int)(bytes / (1024L * 1024L)); } - public void handleTargetsForHost(boolean add, List> targets, HostMO host) throws Exception { - List lstTargets = new ArrayList(); + public void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove, boolean isRemoveAsync, + List> targets, List lstHosts) throws Exception { + ExecutorService executorService = Executors.newFixedThreadPool(lstHosts.size()); + + for (HostMO host : lstHosts) { + List hosts = new ArrayList<>(); - for (Map mapTarget : targets) { - HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + hosts.add(host); - String targetAddress = mapTarget.get(ModifyTargetsCommand.STORAGE_HOST); - Integer targetPort = Integer.parseInt(mapTarget.get(ModifyTargetsCommand.STORAGE_PORT)); - String iScsiName = trimIqn(mapTarget.get(ModifyTargetsCommand.IQN)); + List> dynamicTargetsForHost = new ArrayList<>(); + List> staticTargetsForHost = new ArrayList<>(); - target.setAddress(targetAddress); - target.setPort(targetPort); - target.setIScsiName(iScsiName); + for (Map target : targets) { + String storageAddress = target.get(ModifyTargetsCommand.STORAGE_HOST); - String chapName = mapTarget.get(ModifyTargetsCommand.CHAP_NAME); - String chapSecret = mapTarget.get(ModifyTargetsCommand.CHAP_SECRET); + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(storageAddress, hosts); + List hostsUsingDynamicDiscovery = hostDiscoveryMethod.getHostsUsingDynamicDiscovery(); - if (StringUtils.isNotBlank(chapName) && StringUtils.isNotBlank(chapSecret)) { - HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties(); + if (hostsUsingDynamicDiscovery != null && hostsUsingDynamicDiscovery.size() > 0) { + dynamicTargetsForHost.add(target); + } + else { + staticTargetsForHost.add(target); + } + } - String strAuthType = "chapRequired"; + if (add) { + executorService.submit(new Thread(() -> { + try { + boolean rescan = false; - auth.setChapAuthEnabled(true); - auth.setChapInherited(false); - auth.setChapAuthenticationType(strAuthType); - auth.setChapName(chapName); - auth.setChapSecret(chapSecret); + if (staticTargetsForHost.size() > 0) { + addRemoveInternetScsiTargetsToAllHosts(true, getTargets(staticTargetsForHost), hosts); - String mutualChapName = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_NAME); - String mutualChapSecret = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_SECRET); + rescan = true; + } - if (StringUtils.isNotBlank(mutualChapName) && StringUtils.isNotBlank(mutualChapSecret)) { - auth.setMutualChapInherited(false); - auth.setMutualChapAuthenticationType(strAuthType); - auth.setMutualChapName(mutualChapName); - auth.setMutualChapSecret(mutualChapSecret); - } + if (dynamicTargetsForHost.size() > 0) { + rescan = true; + } - target.setAuthenticationProperties(auth); + if (rescan) { + rescanAllHosts(hosts, true, false); + } + } + catch (Exception ex) { + s_logger.warn(ex.getMessage()); + } + })); } + else { + List targetsToRemove = new ArrayList<>(); - lstTargets.add(target); - } + if (staticTargetsForHost.size() > 0 && + (ModifyTargetsCommand.TargetTypeToRemove.STATIC.equals(targetTypeToRemove) || ModifyTargetsCommand.TargetTypeToRemove.BOTH.equals(targetTypeToRemove))) { + targetsToRemove.addAll(getTargets(staticTargetsForHost)); + } - List hosts = new ArrayList<>(); + if (dynamicTargetsForHost.size() > 0 && + (ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC.equals(targetTypeToRemove) || ModifyTargetsCommand.TargetTypeToRemove.BOTH.equals(targetTypeToRemove))) { + targetsToRemove.addAll(getTargets(dynamicTargetsForHost)); + } + + if (targetsToRemove.size() > 0) { + if (isRemoveAsync) { + new Thread(() -> { + try { + addRemoveInternetScsiTargetsToAllHosts(false, targetsToRemove, hosts); + + rescanAllHosts(hosts, true, false); + } catch (Exception ex) { + s_logger.warn(ex.getMessage()); + } + }).start(); + } else { + executorService.submit(new Thread(() -> { + try { + addRemoveInternetScsiTargetsToAllHosts(false, targetsToRemove, hosts); + + rescanAllHosts(hosts, true, false); + } + catch (Exception ex) { + s_logger.warn(ex.getMessage()); + } + })); + } + } + } + } - hosts.add(host); + executorService.shutdown(); - addRemoveInternetScsiTargetsToAllHosts(add, lstTargets, hosts); + if (!executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES)) { + throw new Exception("The system timed out before completing the task 'handleTargets'."); + } } private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final boolean add, final List targets, - List> hostPairs) throws Exception { + List> hostPairs) throws Exception { List hosts = new ArrayList<>(); for (Pair hostPair : hostPairs) { @@ -2024,11 +2969,11 @@ private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final addRemoveInternetScsiTargetsToAllHosts(add, targets, hosts); } - private void addRemoveInternetScsiTargetsToAllHosts(final boolean add, final List targets, - List hosts) throws Exception { + private void addRemoveInternetScsiTargetsToAllHosts(boolean add, List targets, + List hosts) throws Exception { ExecutorService executorService = Executors.newFixedThreadPool(hosts.size()); - final List exceptions = new ArrayList(); + final List exceptions = new ArrayList<>(); for (HostMO host : hosts) { HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); @@ -2036,34 +2981,26 @@ private void addRemoveInternetScsiTargetsToAllHosts(final boolean add, final Lis boolean iScsiHbaConfigured = false; for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) { - if (hba instanceof HostInternetScsiHba) { - // just finding an instance of HostInternetScsiHba means that we have found at least one configured iSCSI HBA - // at least one iSCSI HBA must be configured before a CloudStack user can use this host for iSCSI storage + if (hba instanceof HostInternetScsiHba && ((HostInternetScsiHba)hba).isIsSoftwareBased()) { iScsiHbaConfigured = true; final String iScsiHbaDevice = hba.getDevice(); final HostStorageSystemMO hss = hostStorageSystem; - executorService.submit(new Thread() { - @Override - public void run() { - try { - if (add) { - hss.addInternetScsiStaticTargets(iScsiHbaDevice, targets); - } else { - hss.removeInternetScsiStaticTargets(iScsiHbaDevice, targets); - } - - hss.rescanHba(iScsiHbaDevice); - hss.rescanVmfs(); - } catch (Exception ex) { - synchronized (exceptions) { - exceptions.add(ex); - } + executorService.submit(new Thread(() -> { + try { + if (add) { + hss.addInternetScsiStaticTargets(iScsiHbaDevice, targets); + } else { + hss.removeInternetScsiStaticTargets(iScsiHbaDevice, targets); + } + } catch (Exception ex) { + synchronized (exceptions) { + exceptions.add(ex); } } - }); + })); } } @@ -2075,7 +3012,7 @@ public void run() { executorService.shutdown(); if (!executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES)) { - throw new Exception("The system timed out before completing the task 'rescanAllHosts'."); + throw new Exception("The system timed out before completing the task 'addRemoveInternetScsiTargetsToAllHosts'."); } if (exceptions.size() > 0) { @@ -2083,40 +3020,56 @@ public void run() { } } - private void rescanAllHosts(VmwareContext context, List> lstHosts) throws Exception { + private void rescanAllHosts(VmwareContext context, List> lstHostPairs, boolean rescanHba, boolean rescanVmfs) throws Exception { + List hosts = new ArrayList<>(lstHostPairs.size()); + + for (Pair hostPair : lstHostPairs) { + HostMO host = new HostMO(context, hostPair.first()); + + hosts.add(host); + } + + rescanAllHosts(hosts, rescanHba, rescanVmfs); + } + + private void rescanAllHosts(List lstHosts, boolean rescanHba, boolean rescanVmfs) throws Exception { + if (!rescanHba && !rescanVmfs) { + // nothing to do + return; + } + ExecutorService executorService = Executors.newFixedThreadPool(lstHosts.size()); - final List exceptions = new ArrayList(); + final List exceptions = new ArrayList<>(); - for (Pair hostPair : lstHosts) { - HostMO host = new HostMO(context, hostPair.first()); + for (HostMO host : lstHosts) { HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); boolean iScsiHbaConfigured = false; for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) { - if (hba instanceof HostInternetScsiHba) { - // just finding an instance of HostInternetScsiHba means that we have found at least one configured iSCSI HBA - // at least one iSCSI HBA must be configured before a CloudStack user can use this host for iSCSI storage + if (hba instanceof HostInternetScsiHba && ((HostInternetScsiHba)hba).isIsSoftwareBased()) { iScsiHbaConfigured = true; final String iScsiHbaDevice = hba.getDevice(); final HostStorageSystemMO hss = hostStorageSystem; - executorService.submit(new Thread() { - @Override - public void run() { - try { + executorService.submit(new Thread(() -> { + try { + if (rescanHba) { hss.rescanHba(iScsiHbaDevice); + } + + if (rescanVmfs) { hss.rescanVmfs(); - } catch (Exception ex) { - synchronized (exceptions) { - exceptions.add(ex); - } + } + } catch (Exception ex) { + synchronized (exceptions) { + exceptions.add(ex); } } - }); + })); } } @@ -2140,7 +3093,7 @@ private static String trimIqn(String iqn) { String[] tmp = iqn.split("/"); if (tmp.length != 3) { - String msg = "Wrong format for iScsi path: " + iqn + ". It should be formatted as '/targetIQN/LUN'."; + String msg = "Wrong format for iSCSI path: " + iqn + ". It should be formatted as '/targetIQN/LUN'."; s_logger.warn(msg); @@ -2151,8 +3104,8 @@ private static String trimIqn(String iqn) { } public ManagedObjectReference prepareManagedStorage(VmwareContext context, VmwareHypervisorHost hyperHost, String diskUuid, String iScsiName, - String storageHost, int storagePort, String volumeName, String chapInitiatorUsername, String chapInitiatorSecret, - String chapTargetUsername, String chapTargetSecret, long size, Command cmd) throws Exception { + String storageHost, int storagePort, String volumeName, String chapInitiatorUsername, String chapInitiatorSecret, + String chapTargetUsername, String chapTargetSecret, long size, Command cmd) throws Exception { ManagedObjectReference morDs = prepareManagedDatastore(context, hyperHost, diskUuid, iScsiName, storageHost, storagePort, chapInitiatorUsername, chapInitiatorSecret, chapTargetUsername, chapTargetSecret); @@ -2168,42 +3121,62 @@ public ManagedObjectReference prepareManagedStorage(VmwareContext context, Vmwar return morDs; } - public void handleDatastoreAndVmdkDetach(String datastoreName, String iqn, String storageHost, int storagePort) throws Exception { + public void handleDatastoreAndVmdkDetach(Command cmd, String datastoreName, String iqn, String storageHost, int storagePort) throws Exception { VmwareContext context = hostService.getServiceContext(null); VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); - removeVmfsDatastore(hyperHost, datastoreName, storageHost, storagePort, trimIqn(iqn)); + removeVmfsDatastore(cmd, hyperHost, datastoreName, storageHost, storagePort, trimIqn(iqn)); } - private void handleDatastoreAndVmdkDetachManaged(String diskUuid, String iqn, String storageHost, int storagePort) throws Exception { + private void handleDatastoreAndVmdkDetachManaged(Command cmd, String diskUuid, String iqn, String storageHost, int storagePort) throws Exception { if (storagePort == DEFAULT_NFS_PORT) { VmwareContext context = hostService.getServiceContext(null); VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); // for managed NFS datastore hyperHost.unmountDatastore(diskUuid); } else { - handleDatastoreAndVmdkDetach(VmwareResource.getDatastoreName(iqn), iqn, storageHost, storagePort); + handleDatastoreAndVmdkDetach(cmd, VmwareResource.getDatastoreName(iqn), iqn, storageHost, storagePort); + } + } + + private class ManagedTarget { + private final String storageAddress; + private final int storagePort; + private final String iqn; + + ManagedTarget(String storageAddress, int storagePort, String iqn) { + this.storageAddress = storageAddress; + this.storagePort = storagePort; + this.iqn = iqn; + } + + public String toString() { + return storageAddress + storagePort + iqn; } } - private void removeManagedTargetsFromCluster(List iqns) throws Exception { - List lstManagedTargets = new ArrayList(); + private void removeManagedTargetsFromCluster(List managedDatastoreNames) throws Exception { + List lstManagedTargets = new ArrayList<>(); VmwareContext context = hostService.getServiceContext(null); VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ClusterMO cluster = new ClusterMO(context, morCluster); List> lstHosts = cluster.getClusterHosts(); - HostMO host = new HostMO(context, lstHosts.get(0).first()); - HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); + HostMO hostMO = new HostMO(context, lstHosts.get(0).first()); + HostStorageSystemMO hostStorageSystem = hostMO.getHostStorageSystemMO(); + + for (String managedDatastoreName : managedDatastoreNames) { + unmountVmfsDatastore(context, hyperHost, managedDatastoreName, lstHosts); + } for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) { - if (hba instanceof HostInternetScsiHba) { + if (hba instanceof HostInternetScsiHba && ((HostInternetScsiHba)hba).isIsSoftwareBased()) { List lstTargets = ((HostInternetScsiHba)hba).getConfiguredStaticTarget(); if (lstTargets != null) { for (HostInternetScsiHbaStaticTarget target : lstTargets) { - if (iqns.contains(target.getIScsiName())) { + if (managedDatastoreNames.contains(VmwareResource.createDatastoreNameFromIqn(target.getIScsiName()))) { lstManagedTargets.add(target); } } @@ -2211,13 +3184,49 @@ private void removeManagedTargetsFromCluster(List iqns) throws Exception } } - addRemoveInternetScsiTargetsToAllHosts(context, false, lstManagedTargets, lstHosts); + ExecutorService executorService = Executors.newFixedThreadPool(lstHosts.size()); + + for (Pair host : lstHosts) { + List> hosts = new ArrayList<>(); + + hosts.add(host); + + List staticTargetsForHost = new ArrayList<>(); + + for (HostInternetScsiHbaStaticTarget iScsiManagedTarget : lstManagedTargets) { + String storageAddress = iScsiManagedTarget.getAddress(); + + HostDiscoveryMethod hostDiscoveryMethod = getHostDiscoveryMethod(context, storageAddress, hosts); + List hostsUsingStaticDiscovery = hostDiscoveryMethod.getHostsUsingStaticDiscovery(); + + if (hostsUsingStaticDiscovery != null && hostsUsingStaticDiscovery.size() > 0) { + staticTargetsForHost.add(iScsiManagedTarget); + } + } + + if (staticTargetsForHost.size() > 0) { + executorService.submit(new Thread(() -> { + try { + addRemoveInternetScsiTargetsToAllHosts(context, false, staticTargetsForHost, hosts); + + rescanAllHosts(context, hosts, true, false); + } + catch (Exception ex) { + s_logger.warn(ex.getMessage()); + } + })); + } + } + + executorService.shutdown(); - rescanAllHosts(context, lstHosts); + if (!executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES)) { + throw new Exception("The system timed out before completing the task 'removeManagedTargetsFromCluster'."); + } } - private List getManagedIqnsFromVirtualDisks(List virtualDisks) { - List managedIqns = new ArrayList(); + private List getManagedDatastoreNamesFromVirtualDisks(List virtualDisks) { + List managedDatastoreNames = new ArrayList<>(); if (virtualDisks != null) { for (VirtualDisk virtualDisk : virtualDisks) { @@ -2239,7 +3248,7 @@ private void removeManagedTargetsFromCluster(List iqns) throws Exception path = path.substring(0, index); if (path.startsWith("iqn.")) { - managedIqns.add(path); + managedDatastoreNames.add("-" + path + "-0"); } } } @@ -2247,15 +3256,15 @@ private void removeManagedTargetsFromCluster(List iqns) throws Exception } } - return managedIqns; + return managedDatastoreNames; } private Long restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, String secStorageUrl, String secStorageDir, - String backupName, long wait, Integer nfsVersion) throws Exception { + String backupName, long wait, Integer nfsVersion) throws Exception { String secondaryMountPoint = mountService.getMountPoint(secStorageUrl, null); - String srcOVAFileName = null; - String srcOVFFileName = null; + String srcOVAFileName; + String srcOVFFileName; srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + "." + ImageFormat.OVA.getFileExtension(); srcOVFFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + ".ovf"; @@ -2335,7 +3344,7 @@ public Answer createVolumeFromSnapshot(CopyCommand cmd) { int index = backedUpSnapshotUuid.lastIndexOf(File.separator); String backupPath = backedUpSnapshotUuid.substring(0, index); backedUpSnapshotUuid = backedUpSnapshotUuid.substring(index + 1); - String details = null; + String details; String newVolumeName = VmwareHelper.getVCenterSafeUuid(); VmwareContext context = hostService.getServiceContext(cmd); @@ -2405,36 +3414,16 @@ private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, S return templateUuid; } - private String getControllerFromConfigurationSetting() throws Exception { - String diskController = null; - VmwareContext context = null; - try { - context = hostService.getServiceContext(null); - VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - diskController = mgr.getDataDiskController(); - } catch (Throwable e) { - if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); - hostService.invalidateServiceContext(context); - } - - String details = "Failed to connect to vCenter due to " + VmwareHelper.getExceptionMessage(e); - s_logger.error(details, e); - } - - return diskController; - } - private String getLegacyVmDataDiskController() throws Exception { return DiskControllerType.lsilogic.toString(); } - public void setNfsVersion(Integer nfsVersion){ + void setNfsVersion(Integer nfsVersion){ this._nfsVersion = nfsVersion; s_logger.debug("VmwareProcessor instance now using NFS version: " + nfsVersion); } - public void setFullCloneFlag(boolean value){ + void setFullCloneFlag(boolean value){ this._fullCloneFlag = value; s_logger.debug("VmwareProcessor instance - create full clone = " + (value ? "TRUE" : "FALSE")); } diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 97d6118d335..4b7080aa5ab 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -2459,12 +2459,12 @@ private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNa return sr; } - private String resignatureIscsiSr(Connection conn, Host host, Map deviceConfig, String srNameLabel, Map smConfig) throws XmlRpcException, XenAPIException { - + private String resignatureIscsiSr(Connection conn, Host host, Map deviceConfig, String srNameLabel, Map smConfig) + throws XmlRpcException, XenAPIException { String pooluuid; + try { - SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), - "user", true, smConfig); + SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig); // The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected // toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()). @@ -2492,6 +2492,7 @@ private String resignatureIscsiSr(Connection conn, Host host, Map iqnToPath = new HashMap(); + // if a VDI is created, record its UUID and its type (ex. VHD) to send back to the CS MS + final Map> iqnToData = new HashMap<>(); + try { final Set vms = VM.getByNameLabel(conn, vmName); if (vms != null) { @@ -112,9 +114,14 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri final VDI newVdi = citrixResourceBase.prepareManagedDisk(conn, disk, vmSpec.getId(), vmSpec.getName()); if (newVdi != null) { + final Map data = new HashMap<>(); + final String path = newVdi.getUuid(conn); - iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path); + data.put(StartAnswer.PATH, path); + data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.VHD.toString()); + + iqnToData.put(disk.getDetails().get(DiskTO.IQN), data); } citrixResourceBase.createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), newVdi); @@ -201,7 +208,7 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri final StartAnswer startAnswer = new StartAnswer(command); - startAnswer.setIqnToPath(iqnToPath); + startAnswer.setIqnToData(iqnToData); return startAnswer; } catch (final Exception e) { @@ -210,7 +217,7 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri final StartAnswer startAnswer = new StartAnswer(command, msg); - startAnswer.setIqnToPath(iqnToPath); + startAnswer.setIqnToData(iqnToData); return startAnswer; } finally { diff --git a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index 2409b6e2e69..106fd9ea3fd 100644 --- a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -58,7 +58,10 @@ import com.cloud.exception.OperationTimedoutException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.StoragePool; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.dao.VolumeDao; @@ -84,6 +87,8 @@ private VolumeDetailsDao volumeDetailsDao; @Inject VMInstanceDao instanceDao; + @Inject + SnapshotDao snapshotDao; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -135,6 +140,21 @@ private String getBasicIqn(long volumeId) { return volumeDetail.getValue(); } + private void verifyNoSnapshotsOnManagedStorageVolumes(Map volumeToPool) { + for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volumeInfo = entry.getKey(); + StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId()); + + if (storagePool.isManaged()) { + List snapshots = getNonDestroyedSnapshots(volumeInfo.getId()); + + if (snapshots != null && snapshots.size() > 0) { + throw new CloudRuntimeException("Cannot perform this action on a volume with one or more snapshots"); + } + } + } + } + /** * Tell the underlying storage plug-in to create a new volume, put it in the VAG of the destination cluster, and * send a command to the destination cluster to create an SR and to attach to the SR from all hosts in the cluster. @@ -185,6 +205,24 @@ private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePoo return iqn; } + private List getNonDestroyedSnapshots(long csVolumeId) { + List lstSnapshots = snapshotDao.listByVolumeId(csVolumeId); + + if (lstSnapshots == null) { + lstSnapshots = new ArrayList<>(); + } + + List lstSnapshots2 = new ArrayList<>(); + + for (SnapshotVO snapshot : lstSnapshots) { + if (!Snapshot.State.Destroyed.equals(snapshot.getState())) { + lstSnapshots2.add(snapshot); + } + } + + return lstSnapshots2; + } + private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHost, VolumeObjectTO volumeTO) { final Map details = new HashMap<>(); @@ -273,6 +311,8 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine // Initiate migration of a virtual machine with its volumes. try { + verifyNoSnapshotsOnManagedStorageVolumes(volumeToPool); + List> volumeToStorageUuid = new ArrayList<>(); for (Map.Entry entry : volumeToPool.entrySet()) { diff --git a/plugins/pom.xml b/plugins/pom.xml index f57fbdc126c..680f670ff85 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -187,6 +187,17 @@ network-elements/cisco-vnmc + + vmware-sioc + + + noredist + + + + api/vmware-sioc + + mysqlha diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index bd93abb8d68..e7f96ca4a79 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -36,6 +36,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.Snapshot.State; @@ -81,6 +82,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @@ -118,6 +120,7 @@ mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); return mapCapabilities; } @@ -332,12 +335,11 @@ private long getDefaultBurstIops(long storagePoolId, long maxIops) { float fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops); - return (long)(maxIops * fClusterDefaultBurstIopsPercentOfMaxIops); + return Math.min((long)(maxIops * fClusterDefaultBurstIopsPercentOfMaxIops), SolidFireUtil.MAX_IOPS_PER_VOLUME); } - private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) { - long storagePoolId = dataObject.getDataStore().getId(); - + private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, + DataObject dataObject, long storagePoolId, long sfAccountId) { final Long minIops; final Long maxIops; final Long volumeSize; @@ -423,12 +425,12 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { // To be backward compatible with releases prior to 4.5, call updateVolumeDetails here. // That way if SolidFireUtil.VOLUME_SIZE wasn't put in the volume_details table when the // volume was initially created, it can be placed in volume_details here. - updateVolumeDetails(volume.getId(), volumeSize); + updateVolumeDetails(volume.getId(), volumeSize, sfVolume.getScsiNaaDeviceId()); usedSpace += volumeSize; } - catch (NumberFormatException ex) { - // can be ignored (the "folder" column didn't have a valid "long" in it (hasn't been placed there yet)) + catch (Exception ex) { + // can be ignored } } } @@ -493,10 +495,15 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO // TemplateInfo sometimes has a size equal to null. long templateSize = templateInfo.getSize() != null ? templateInfo.getSize() : 0; - volumeSize = (long)(templateSize + templateSize * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f)); + if (templateInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + volumeSize = templateSize; + } + else { + volumeSize = (long)(templateSize + templateSize * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f)); + } } - return volumeSize; + return Math.max(volumeSize, SolidFireUtil.MIN_VOLUME_SIZE); } private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, Integer hypervisorSnapshotReserve) { @@ -506,7 +513,7 @@ private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, In volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); } - return volumeSize; + return Math.max(volumeSize, SolidFireUtil.MIN_VOLUME_SIZE); } /** @@ -537,7 +544,7 @@ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool s private final long _maxIops; private final long _burstIops; - public Iops(long minIops, long maxIops, long burstIops) throws IllegalArgumentException { + Iops(long minIops, long maxIops, long burstIops) throws IllegalArgumentException { if (minIops <= 0 || maxIops <= 0) { throw new IllegalArgumentException("The 'Min IOPS' and 'Max IOPS' values must be greater than 0."); } @@ -555,15 +562,15 @@ public Iops(long minIops, long maxIops, long burstIops) throws IllegalArgumentEx _burstIops = burstIops; } - public long getMinIops() { + long getMinIops() { return _minIops; } - public long getMaxIops() { + long getMaxIops() { return _maxIops; } - public long getBurstIops() { + long getBurstIops() { return _burstIops; } } @@ -618,6 +625,11 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet callback.complete(result); } + else { + if (errMsg != null) { + throw new CloudRuntimeException(errMsg); + } + } } private long getCreateSolidFireAccountId(SolidFireUtil.SolidFireConnection sfConnection, long csAccountId, long storagePoolId) { @@ -741,7 +753,7 @@ private boolean shouldTakeSnapshot(long snapshotId) { SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, "takeSnapshot"); if (snapshotDetails != null && snapshotDetails.getValue() != null) { - return new Boolean(snapshotDetails.getValue()); + return Boolean.parseBoolean(snapshotDetails.getValue()); } return false; @@ -778,8 +790,8 @@ private boolean shouldTakeSnapshot(long snapshotId) { " and data-object type: " + dataObjectType); } - final long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfNewVolumeName, - getVolumeAttributes(volumeInfo)); + final long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, + SolidFireUtil.getSolidFireVolumeName(sfNewVolumeName), getVolumeAttributes(volumeInfo)); final Iops iops = getIops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), storagePoolId); @@ -826,7 +838,7 @@ private boolean shouldTakeSnapshot(long snapshotId) { SolidFireUtil.SolidFireSnapshot sfSnapshot = SolidFireUtil.getSnapshot(sfConnection, sfVolumeId, sfSnapshotId); - long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfSnapshot.getName(), null); + long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, SolidFireUtil.getSolidFireVolumeName(sfSnapshot.getName()), null); snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.STORAGE_POOL_ID); @@ -839,12 +851,17 @@ private boolean shouldTakeSnapshot(long snapshotId) { return SolidFireUtil.getVolume(sfConnection, newSfVolumeId); } - private void updateVolumeDetails(long volumeId, long sfVolumeSize) { + private void updateVolumeDetails(long volumeId, long sfVolumeSize, String scsiNaaDeviceId) { volumeDetailsDao.removeDetail(volumeId, SolidFireUtil.VOLUME_SIZE); + volumeDetailsDao.removeDetail(volumeId, DiskTO.SCSI_NAA_DEVICE_ID); VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, SolidFireUtil.VOLUME_SIZE, String.valueOf(sfVolumeSize), false); volumeDetailsDao.persist(volumeDetailVo); + + volumeDetailVo = new VolumeDetailVO(volumeId, DiskTO.SCSI_NAA_DEVICE_ID, scsiNaaDeviceId, false); + + volumeDetailsDao.persist(volumeDetailVo); } @Override @@ -889,7 +906,7 @@ public boolean canCopy(DataObject srcData, DataObject destData) { @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - CreateCmdResult result = null; + CreateCmdResult result; try { VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); @@ -928,9 +945,17 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback 0) { + sfNewSnapshotName = StringUtils.left(volumeInfo.getName(), (volumeInfo.getName().length() - trimRequired)) + "-" + snapshotInfo.getUuid(); + } - updateSnapshotDetails(snapshotInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize); + long sfNewSnapshotId = SolidFireUtil.createSnapshot(sfConnection, sfVolumeId, SolidFireUtil.getSolidFireVolumeName(sfNewSnapshotName), + getSnapshotAttributes(snapshotInfo)); + + updateSnapshotDetails(snapshotInfo.getId(), volumeInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize); snapshotObjectTo.setPath("SfSnapshotId=" + sfNewSnapshotId); } @@ -941,8 +966,8 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead."); + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshot2, AsyncCompletionCallback callback) { + VolumeInfo volumeInfo = snapshot.getBaseVolume(); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + + if (volumeVO == null || volumeVO.getRemoved() != null) { + String errMsg = "The volume that the snapshot belongs to no longer exists."; + + CommandResult commandResult = new CommandResult(); + + commandResult.setResult(errMsg); + + callback.complete(commandResult); + + return; + } + + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(volumeVO.getPoolId(), storagePoolDetailsDao); + + long sfVolumeId = Long.parseLong(volumeInfo.getFolder()); + + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.SNAPSHOT_ID); + + long sfSnapshotId = Long.parseLong(snapshotDetails.getValue()); + + SolidFireUtil.rollBackVolumeToSnapshot(sfConnection, sfVolumeId, sfSnapshotId); + + SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId); + + updateVolumeDetails(volumeVO.getId(), sfVolume.getTotalSize(), sfVolume.getScsiNaaDeviceId()); + + CommandResult commandResult = new CommandResult(); + + callback.complete(commandResult); } @Override @@ -1338,65 +1403,80 @@ public void resize(DataObject dataObject, AsyncCompletionCallback hsr) { + Integer hsr = volumeInfo.getHypervisorSnapshotReserve(); + + if (payload.newSize != null || payload.newHypervisorSnapshotReserve != null) { + if (payload.newHypervisorSnapshotReserve != null) { + if (hsr != null) { + if (payload.newHypervisorSnapshotReserve > hsr) { + hsr = payload.newHypervisorSnapshotReserve; + } + } else { hsr = payload.newHypervisorSnapshotReserve; } } - else { - hsr = payload.newHypervisorSnapshotReserve; - } + + sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(newSize, hsr); } - sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(payload.newSize, hsr); - } + Map mapAttributes = new HashMap<>(); - Map mapAttributes = new HashMap<>(); + mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId())); + mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize)); - mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId())); - mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize)); + long newMaxIops = payload.newMaxIops != null ? payload.newMaxIops : sfVolume.getMaxIops(); - SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes, - payload.newMinIops, payload.newMaxIops, getDefaultBurstIops(storagePoolId, payload.newMaxIops)); + SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes, + newMinIops, newMaxIops, getDefaultBurstIops(storagePoolId, newMaxIops)); - VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); - volume.setMinIops(payload.newMinIops); - volume.setMaxIops(payload.newMaxIops); - volume.setHypervisorSnapshotReserve(hsr); + volume.setMinIops(newMinIops); + volume.setMaxIops(newMaxIops); + volume.setHypervisorSnapshotReserve(hsr); - volumeDao.update(volume.getId(), volume); + volumeDao.update(volume.getId(), volume); - // SolidFireUtil.VOLUME_SIZE was introduced in 4.5. - updateVolumeDetails(volume.getId(), sfNewVolumeSize); - } else { - errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; + // SolidFireUtil.VOLUME_SIZE was introduced in 4.5. + updateVolumeDetails(volume.getId(), sfNewVolumeSize, sfVolume.getScsiNaaDeviceId()); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; + } } + catch (Exception ex) { + errMsg = ex.getMessage(); + } + finally { + CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); - CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); - - result.setResult(errMsg); + result.setResult(errMsg); - callback.complete(result); + callback.complete(result); + } } private void verifySufficientBytesForStoragePool(long requestedBytes, long storagePoolId) { @@ -1453,10 +1533,8 @@ private void verifySufficientIopsForStoragePool(long requestedIops, long storage } } - private void verifySufficientIopsForStoragePool(long storagePoolId, long volumeId, long newMinIops) { - VolumeVO volume = volumeDao.findById(volumeId); - - long currentMinIops = volume.getMinIops(); + private void verifySufficientIopsForStoragePool(long storagePoolId, SolidFireUtil.SolidFireVolume sfVolume, long newMinIops) { + long currentMinIops = sfVolume.getMinIops(); long diffInMinIops = newMinIops - currentMinIops; // if the desire is for more IOPS @@ -1488,8 +1566,20 @@ private void deleteSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnectio private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnection, long csSnapshotId, long sfSnapshotId) { SolidFireUtil.deleteSnapshot(sfConnection, sfSnapshotId); + final long volumeId; + final VolumeVO volume; + SnapshotVO snapshot = snapshotDao.findById(csSnapshotId); - VolumeVO volume = volumeDao.findById(snapshot.getVolumeId()); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.ORIG_CS_VOLUME_ID); + + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + volumeId = Long.valueOf(snapshotDetails.getValue()); + } + else { + volumeId = snapshot.getVolumeId(); + } + + volume = volumeDao.findById(volumeId); if (volume == null) { // if the CloudStack volume has been deleted List lstSnapshots = getNonDestroyedSnapshots(snapshot.getVolumeId()); @@ -1500,7 +1590,7 @@ private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnect // The CloudStack volume snapshot has not yet been set to the DESTROYED state, so check to make // sure snapshotVo.getId() != csSnapshotId when determining if any volume snapshots remain for the given CloudStack volume. if (snapshotVo.getId() != csSnapshotId) { - SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotVo.getId(), SolidFireUtil.SNAPSHOT_ID); + snapshotDetails = snapshotDetailsDao.findDetail(snapshotVo.getId(), SolidFireUtil.SNAPSHOT_ID); // We are only interested here in volume snapshots that make use of SolidFire snapshots (as opposed to ones // that make use of SolidFire volumes). @@ -1511,9 +1601,9 @@ private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnect } if (lstSnapshots2.isEmpty()) { - volume = volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId()); + VolumeVO volumeToDelete = volumeDao.findByIdIncludingRemoved(volumeId); - SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volume.getFolder())); + SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volumeToDelete.getFolder())); } } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 42fe0256876..38c8c240094 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -41,7 +42,10 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; @@ -57,10 +61,13 @@ import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.utils.exception.CloudRuntimeException; +import com.google.common.base.Preconditions; + public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class); @Inject private CapacityManager _capacityMgr; + @Inject private ClusterDao _clusterDao; @Inject private DataCenterDao _zoneDao; @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private PrimaryDataStoreHelper _dataStoreHelper; @@ -77,6 +84,8 @@ public DataStore initialize(Map dsInfos) { String url = (String)dsInfos.get("url"); Long zoneId = (Long)dsInfos.get("zoneId"); + Long podId = (Long)dsInfos.get("podId"); + Long clusterId = (Long)dsInfos.get("clusterId"); String storagePoolName = (String)dsInfos.get("name"); String providerName = (String)dsInfos.get("providerName"); Long capacityBytes = (Long)dsInfos.get("capacityBytes"); @@ -85,6 +94,14 @@ public DataStore initialize(Map dsInfos) { @SuppressWarnings("unchecked") Map details = (Map)dsInfos.get("details"); + if (podId != null && clusterId == null) { + throw new CloudRuntimeException("If the Pod ID is specified, the Cluster ID must also be specified."); + } + + if (podId == null && clusterId != null) { + throw new CloudRuntimeException("If the Pod ID is not specified, the Cluster ID must also not be specified."); + } + String storageVip = SolidFireUtil.getStorageVip(url); int storagePort = SolidFireUtil.getStoragePort(url); @@ -104,13 +121,26 @@ public DataStore initialize(Map dsInfos) { parameters.setType(StoragePoolType.Iscsi); parameters.setUuid(UUID.randomUUID().toString()); parameters.setZoneId(zoneId); + parameters.setPodId(podId); + parameters.setClusterId(clusterId); parameters.setName(storagePoolName); parameters.setProviderName(providerName); parameters.setManaged(true); parameters.setCapacityBytes(capacityBytes); parameters.setUsedBytes(0); parameters.setCapacityIops(capacityIops); - parameters.setHypervisorType(HypervisorType.Any); + + if (clusterId != null) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + + Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster"); + + parameters.setHypervisorType(clusterVO.getHypervisorType()); + } + else { + parameters.setHypervisorType(HypervisorType.Any); + } + parameters.setTags(tags); parameters.setDetails(details); @@ -166,11 +196,26 @@ public DataStore initialize(Map dsInfos) { ". Exception: " + ex); } + if (lClusterDefaultMinIops < SolidFireUtil.MIN_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be greater than or equal to " + + SolidFireUtil.MIN_IOPS_PER_VOLUME + "."); + } + + if (lClusterDefaultMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to " + + SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME + "."); + } + if (lClusterDefaultMinIops > lClusterDefaultMaxIops) { throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "'."); } + if (lClusterDefaultMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "' must be less than or equal to " + + SolidFireUtil.MAX_IOPS_PER_VOLUME + "."); + } + if (Float.compare(fClusterDefaultBurstIopsPercentOfMaxIops, 1.0f) < 0) { throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + "' must be greater than or equal to 1."); } @@ -186,23 +231,35 @@ public DataStore initialize(Map dsInfos) { // do not implement this method for SolidFire's plug-in @Override public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - return true; // should be ignored for zone-wide-only plug-ins like SolidFire's + return true; } - // do not implement this method for SolidFire's plug-in @Override - public boolean attachCluster(DataStore store, ClusterScope scope) { - return true; // should be ignored for zone-wide-only plug-ins like SolidFire's + public boolean attachCluster(DataStore dataStore, ClusterScope scope) { + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore; + + List hosts = + _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); + + for (HostVO host : hosts) { + try { + _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } + } + + _dataStoreHelper.attachCluster(dataStore); + + return true; } @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - _dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - List hosts = new ArrayList(); + List hosts = new ArrayList<>(); hosts.addAll(xenServerHosts); hosts.addAll(vmWareServerHosts); @@ -216,6 +273,8 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h } } + _dataStoreHelper.attachZone(dataStore); + return true; } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index e17219c207d..3172b1af5b4 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -47,6 +47,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; @@ -90,7 +91,7 @@ @Inject private StoragePoolAutomation _storagePoolAutomation; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject private StoragePoolHostDao _storagePoolHostDao; - @Inject protected TemplateManager _tmpltMgr; + @Inject private TemplateManager _tmpltMgr; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -168,6 +169,10 @@ public DataStore initialize(Map dsInfos) { details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername); details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword); + if (capacityBytes < SolidFireUtil.MIN_VOLUME_SIZE) { + capacityBytes = SolidFireUtil.MIN_VOLUME_SIZE; + } + long lMinIops = 100; long lMaxIops = 15000; long lBurstIops = 15000; @@ -214,8 +219,16 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'."); } - if (lMinIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) { - throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS."); + if (lMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("This volume's Min IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS."); + } + + if (lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("This volume's Max IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS."); + } + + if (lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("This volume's Burst IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS."); } details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops)); @@ -282,7 +295,9 @@ public DataStore initialize(Map dsInfos) { SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), _accountDetailsDao); } catch (Exception ex) { - _primaryDataStoreDao.expunge(dataStore.getId()); + if (dataStore != null) { + _primaryDataStoreDao.expunge(dataStore.getId()); + } throw new CloudRuntimeException(ex.getMessage()); } @@ -320,7 +335,7 @@ private StoragePoolType getStorageType(HypervisorType hypervisorType) { private final SolidFireUtil.SolidFireVolume _sfVolume; private final SolidFireUtil.SolidFireAccount _sfAccount; - public SolidFireCreateVolume(SolidFireUtil.SolidFireVolume sfVolume, SolidFireUtil.SolidFireAccount sfAccount) { + SolidFireCreateVolume(SolidFireUtil.SolidFireVolume sfVolume, SolidFireUtil.SolidFireAccount sfAccount) { _sfVolume = sfVolume; _sfAccount = sfAccount; } @@ -394,7 +409,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { throw new CloudRuntimeException("Unable to create storage in cluster " + primaryDataStoreInfo.getClusterId()); } - List poolHosts = new ArrayList(); + List poolHosts = new ArrayList<>(); for (HostVO host : allHosts) { try { @@ -427,7 +442,7 @@ private boolean createStoragePool(HostVO host, StoragePool storagePool) { if (HypervisorType.VMware.equals(hypervisorType)) { cmd.setCreateDatastore(true); - Map details = new HashMap(); + Map details = new HashMap<>(); StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.DATASTORE_NAME); @@ -455,7 +470,7 @@ private boolean createStoragePool(HostVO host, StoragePool storagePool) { } else { _primaryDataStoreDao.expunge(storagePool.getId()); - String msg = ""; + final String msg; if (answer != null) { msg = "Cannot create storage pool through host '" + hostId + "' due to the following: " + answer.getDetails(); @@ -514,6 +529,7 @@ public boolean deleteDataStore(DataStore dataStore) { } Long clusterId = null; + Long hostId = null; for (StoragePoolHostVO host : hostPoolRecords) { DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(storagePool); @@ -521,7 +537,7 @@ public boolean deleteDataStore(DataStore dataStore) { if (HypervisorType.VMware.equals(hypervisorType)) { deleteCmd.setRemoveDatastore(true); - Map details = new HashMap(); + Map details = new HashMap<>(); StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.DATASTORE_NAME); @@ -551,12 +567,18 @@ public boolean deleteDataStore(DataStore dataStore) { if (hostVO != null) { clusterId = hostVO.getClusterId(); + hostId = hostVO.getId(); } break; } else { - s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); + if (answer != null) { + s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); + } + else { + s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId()); + } } } @@ -582,11 +604,60 @@ public boolean deleteDataStore(DataStore dataStore) { } } + if (hostId != null) { + handleTargetsForVMware(hostId, storagePool.getId()); + } + deleteSolidFireVolume(storagePool.getId()); return _primaryDataStoreHelper.deletePrimaryDataStore(dataStore); } + private void handleTargetsForVMware(long hostId, long storagePoolId) { + HostVO host = _hostDao.findById(hostId); + + if (host.getHypervisorType() == HypervisorType.VMware) { + String storageAddress = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP).getValue(); + int storagePort = Integer.parseInt(_storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT).getValue()); + String iqn = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN).getValue(); + + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + List> targets = new ArrayList<>(); + + Map target = new HashMap<>(); + + target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress); + target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort)); + target.put(ModifyTargetsCommand.IQN, iqn); + + targets.add(target); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(false); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); + cmd.setRemoveAsync(true); + + sendModifyTargetsCommand(cmd, hostId); + } + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + String msg = "Unable to get an answer to the modify targets command"; + + s_logger.warn(msg); + } + else if (!answer.getResult()) { + String msg = "Unable to modify target on the following host: " + hostId; + + s_logger.warn(msg); + } + } + private void removeVolumeFromVag(long storagePoolId, long clusterId) { long sfVolumeId = getVolumeId(storagePoolId); ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId)); @@ -671,8 +742,8 @@ public void updateStoragePool(StoragePool storagePool, Map detai long burstIops = currentBurstIops; if (capacityIops != null) { - if (capacityIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) { - throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS."); + if (capacityIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) { + throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS."); } float maxPercentOfMin = currentMaxIops / (float)currentMinIops; diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index 21a7fade52a..f9c27e9d840 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -82,7 +82,7 @@ public boolean hostAdded(long hostId) { SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.PROVIDER_NAME, _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); - handleVMware(host, true); + handleVMware(host, true, ModifyTargetsCommand.TargetTypeToRemove.NEITHER); return true; } @@ -122,10 +122,6 @@ public boolean hostDisconnected(long hostId, long storagePoolId) { @Override public boolean hostAboutToBeRemoved(long hostId) { - HostVO host = _hostDao.findById(hostId); - - handleVMware(host, false); - return true; } @@ -134,6 +130,10 @@ public boolean hostRemoved(long hostId, long clusterId) { SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.PROVIDER_NAME, _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + HostVO host = _hostDao.findById(hostId); + + handleVMware(host, false, ModifyTargetsCommand.TargetTypeToRemove.BOTH); + return true; } @@ -151,7 +151,7 @@ private void handleXenServer(long clusterId, long hostId, long storagePoolId) { } } - private void handleVMware(HostVO host, boolean add) { + private void handleVMware(HostVO host, boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove) { if (HypervisorType.VMware.equals(host.getHypervisorType())) { List storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.PROVIDER_NAME); @@ -166,8 +166,9 @@ private void handleVMware(HostVO host, boolean add) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); - cmd.setAdd(add); cmd.setTargets(targets); + cmd.setAdd(add); + cmd.setTargetTypeToRemove(targetTypeToRemove); sendModifyTargetsCommand(cmd, host.getId()); } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index 29c39483d11..66aafacdbfd 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -76,7 +76,7 @@ public boolean hostAdded(long hostId) { SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME, clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao); - handleVMware(hostId, true); + handleVMware(hostId, true, ModifyTargetsCommand.TargetTypeToRemove.NEITHER); return true; } @@ -121,20 +121,22 @@ public boolean hostDisconnected(long hostId, long storagePoolId) { @Override public boolean hostAboutToBeRemoved(long hostId) { - handleVMware(hostId, false); + HostVO host = hostDao.findById(hostId); + + SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), false, SolidFireUtil.SHARED_PROVIDER_NAME, + clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao); + + handleVMware(hostId, false, ModifyTargetsCommand.TargetTypeToRemove.BOTH); return true; } @Override public boolean hostRemoved(long hostId, long clusterId) { - SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME, - clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao); - return true; } - private void handleVMware(long hostId, boolean add) { + private void handleVMware(long hostId, boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove) { HostVO host = hostDao.findById(hostId); if (HypervisorType.VMware.equals(host.getHypervisorType())) { @@ -172,8 +174,10 @@ private void handleVMware(long hostId, boolean add) { if (targets.size() > 0) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); - cmd.setAdd(add); cmd.setTargets(targets); + cmd.setAdd(add); + cmd.setTargetTypeToRemove(targetTypeToRemove); + cmd.setRemoveAsync(true); sendModifyTargetsCommand(cmd, hostId); } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index f5b67a78bac..81adf4b343e 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -65,6 +65,7 @@ import com.solidfire.element.api.ModifyVolumeAccessGroupRequest; import com.solidfire.element.api.ModifyVolumeRequest; import com.solidfire.element.api.QoS; +import com.solidfire.element.api.RollbackToSnapshotRequest; import com.solidfire.element.api.Snapshot; import com.solidfire.element.api.SolidFireElement; import com.solidfire.element.api.Volume; @@ -115,6 +116,8 @@ public static final String CloudStackTemplateId = "CloudStackTemplateId"; public static final String CloudStackTemplateSize = "CloudStackTemplateSize"; + public static final String ORIG_CS_VOLUME_ID = "originalCloudStackVolumeId"; + public static final String VOLUME_SIZE = "sfVolumeSize"; public static final String STORAGE_POOL_ID = "sfStoragePoolId"; @@ -124,6 +127,10 @@ public static final String DATASTORE_NAME = "datastoreName"; public static final String IQN = "iqn"; + public static final long MIN_VOLUME_SIZE = 1000000000; + + public static final long MIN_IOPS_PER_VOLUME = 100; + public static final long MAX_MIN_IOPS_PER_VOLUME = 15000; public static final long MAX_IOPS_PER_VOLUME = 100000; private static final int DEFAULT_MANAGEMENT_PORT = 443; @@ -520,7 +527,8 @@ public static SolidFireVolume getVolume(SolidFireConnection sfConnection, long v Volume volume = getSolidFireElement(sfConnection).listVolumes(request).getVolumes()[0]; return new SolidFireVolume(volume.getVolumeID(), volume.getName(), volume.getIqn(), volume.getAccountID(), volume.getStatus(), - volume.getEnable512e(), volume.getQos().getMinIOPS(), volume.getQos().getMaxIOPS(), volume.getQos().getBurstIOPS(), volume.getTotalSize()); + volume.getEnable512e(), volume.getQos().getMinIOPS(), volume.getQos().getMaxIOPS(), volume.getQos().getBurstIOPS(), + volume.getTotalSize(), volume.getScsiNAADeviceID()); } public static void deleteVolume(SolidFireConnection sfConnection, long volumeId) { @@ -544,9 +552,10 @@ public static void deleteVolume(SolidFireConnection sfConnection, long volumeId) private final long _maxIops; private final long _burstIops; private final long _totalSize; + private final String _scsiNaaDeviceId; SolidFireVolume(long id, String name, String iqn, long accountId, String status, boolean enable512e, - long minIops, long maxIops, long burstIops, long totalSize) { + long minIops, long maxIops, long burstIops, long totalSize, String scsiNaaDeviceId) { _id = id; _name = name; _iqn = "/" + iqn + "/0"; @@ -557,6 +566,7 @@ public static void deleteVolume(SolidFireConnection sfConnection, long volumeId) _maxIops = maxIops; _burstIops = burstIops; _totalSize = totalSize; + _scsiNaaDeviceId = scsiNaaDeviceId; } public long getId() { @@ -599,6 +609,10 @@ public long getTotalSize() { return _totalSize; } + public String getScsiNaaDeviceId() { + return _scsiNaaDeviceId; + } + @Override public int hashCode() { return _iqn.hashCode(); @@ -644,11 +658,13 @@ public static SolidFireSnapshot getSnapshot(SolidFireConnection sfConnection, lo Snapshot[] snapshots = getSolidFireElement(sfConnection).listSnapshots(request).getSnapshots(); String snapshotName = null; + long totalSize = 0; if (snapshots != null) { for (Snapshot snapshot : snapshots) { if (snapshot.getSnapshotID() == snapshotId) { snapshotName = snapshot.getName(); + totalSize = snapshot.getTotalSize(); break; } @@ -659,7 +675,7 @@ public static SolidFireSnapshot getSnapshot(SolidFireConnection sfConnection, lo throw new CloudRuntimeException("Could not find SolidFire snapshot ID: " + snapshotId + " for the following SolidFire volume ID: " + volumeId); } - return new SolidFireSnapshot(snapshotId, snapshotName); + return new SolidFireSnapshot(snapshotId, snapshotName, totalSize); } public static void deleteSnapshot(SolidFireConnection sfConnection, long snapshotId) { @@ -670,13 +686,24 @@ public static void deleteSnapshot(SolidFireConnection sfConnection, long snapsho getSolidFireElement(sfConnection).deleteSnapshot(request); } + public static void rollBackVolumeToSnapshot(SolidFireConnection sfConnection, long volumeId, long snapshotId) { + RollbackToSnapshotRequest request = RollbackToSnapshotRequest.builder() + .volumeID(volumeId) + .snapshotID(snapshotId) + .build(); + + getSolidFireElement(sfConnection).rollbackToSnapshot(request); + } + public static class SolidFireSnapshot { private final long _id; private final String _name; + private final long _totalSize; - SolidFireSnapshot(long id, String name) { + SolidFireSnapshot(long id, String name, long totalSize) { _id = id; _name = name; + _totalSize = totalSize; } public long getId() { @@ -686,6 +713,10 @@ public long getId() { public String getName() { return _name; } + + public long getTotalSize() { + return _totalSize; + } } public static long createClone(SolidFireConnection sfConnection, long volumeId, long snapshotId, long accountId, diff --git a/server/src/com/cloud/api/query/ViewResponseHelper.java b/server/src/com/cloud/api/query/ViewResponseHelper.java index 940ae288c42..949bc1733b2 100644 --- a/server/src/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/com/cloud/api/query/ViewResponseHelper.java @@ -293,7 +293,9 @@ else if (vr.getFormat() == ImageFormat.VHD){ vs = ApiDBUtils.getVolumeStatistics(vrData.getPath()); } else if (vr.getFormat() == ImageFormat.OVA){ - vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo()); + if (vrData.getChainInfo() != null) { + vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo()); + } } if (vs != null){ long vsz = vs.getVirtualSize(); diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 16be9cee799..b2c44600a78 100644 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -899,7 +899,14 @@ "0", "Default disk I/O writerate in bytes per second allowed in User vm's disk.", null), - + KvmAutoConvergence( + "Advanced", + ManagementServer.class, + Boolean.class, + "kvm.auto.convergence", + "false", + "Setting this to 'true' allows KVM to use auto convergence to complete VM migration (libvirt version 1.2.3+ and QEMU version 1.6+)", + null), ControlCidr( "Advanced", ManagementServer.class, diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 6ee0712d909..56c912d29c1 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -1232,16 +1232,29 @@ public boolean deleteEvents(final DeleteEventsCmd cmd) { if (volClusterId != null) { if (!host.getClusterId().equals(volClusterId) || usesLocal) { - if (hasSuitablePoolsForVolume(volume, host, vmProfile)) { - requiresStorageMotion.put(host, true); - } else { + if (storagePool.isManaged()) { + // At the time being, we do not support storage migration of a volume from managed storage unless the managed storage + // is at the zone level and the source and target storage pool is the same. + // If the source and target storage pool is the same and it is managed, then we still have to perform a storage migration + // because we need to create a new target volume and copy the contents of the source volume into it before deleting the + // source volume. iterator.remove(); } + else { + if (hasSuitablePoolsForVolume(volume, host, vmProfile)) { + requiresStorageMotion.put(host, true); + } else { + iterator.remove(); + } + } } } else { if (storagePool.isManaged()) { if (srcHost.getClusterId() != host.getClusterId()) { + // If the volume's storage pool is managed and at the zone level, then we still have to perform a storage migration + // because we need to create a new target volume and copy the contents of the source volume into it before deleting + // the source volume. requiresStorageMotion.put(host, true); } } @@ -1388,12 +1401,19 @@ private boolean hasSuitablePoolsForVolume(final VolumeVO volume, final Host host final StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId()); // Get all the pools available. Only shared pools are considered because only a volume on a shared pools // can be live migrated while the virtual machine stays on the same host. - List storagePools = null; - if (srcVolumePool.getClusterId() == null) { - storagePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null); - } else { + + List storagePools; + + if (srcVolumePool.getClusterId() != null) { storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null); } + else { + storagePools = new ArrayList<>(); + } + + List zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null); + + storagePools.addAll(zoneWideStoragePools); storagePools.remove(srcVolumePool); for (final StoragePoolVO pool : storagePools) { diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index ac20f0f36ee..724ca8a4745 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -40,8 +40,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.hypervisor.Hypervisor; - import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.log4j.Logger; @@ -104,6 +102,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; +import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; @@ -143,6 +142,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.offering.DiskOffering; @@ -846,6 +846,32 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } + @Override + public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) { + final Map details = new HashMap<>(); + + details.put(DeleteStoragePoolCommand.DATASTORE_NAME, iScsiName); + details.put(DeleteStoragePoolCommand.IQN, iScsiName); + details.put(DeleteStoragePoolCommand.STORAGE_HOST, storagePool.getHostAddress()); + details.put(DeleteStoragePoolCommand.STORAGE_PORT, String.valueOf(storagePool.getPort())); + + final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(); + + cmd.setDetails(details); + cmd.setRemoveDatastore(true); + + final Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null || !answer.getResult()) { + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + } + @Override @DB public boolean deletePool(DeletePoolCmd cmd) { @@ -1237,41 +1263,54 @@ public void cleanupStorage(boolean recurring) { } } + /** + * This method only applies for managed storage. + * + * For XenServer and vSphere, see if we need to remove an SR or a datastore, then remove the underlying volume + * from any applicable access control list (before other code attempts to delete the volume that supports it). + * + * For KVM, just tell the underlying storage plug-in to remove the volume from any applicable access control list + * (before other code attempts to delete the volume that supports it). + */ private void handleManagedStorage(Volume volume) { Long instanceId = volume.getInstanceId(); - // The idea of this "if" statement is to see if we need to remove an SR/datastore before - // deleting the volume that supports it on a SAN. This only applies for managed storage. if (instanceId != null) { StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); if (storagePool != null && storagePool.isManaged()) { - DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); - DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); + VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId); - DettachCommand cmd = new DettachCommand(disk, null); + Long lastHostId = vmInstanceVO.getLastHostId(); - cmd.setManaged(true); + if (lastHostId != null) { + HostVO host = _hostDao.findById(lastHostId); + ClusterVO cluster = _clusterDao.findById(host.getClusterId()); + VolumeInfo volumeInfo = volFactory.getVolume(volume.getId()); - cmd.setStorageHost(storagePool.getHostAddress()); - cmd.setStoragePort(storagePool.getPort()); + if (cluster.getHypervisorType() == HypervisorType.KVM) { + volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); + } + else { + DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); + DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); - cmd.set_iScsiName(volume.get_iScsiName()); + DettachCommand cmd = new DettachCommand(disk, null); - VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId); + cmd.setManaged(true); - Long lastHostId = vmInstanceVO.getLastHostId(); + cmd.setStorageHost(storagePool.getHostAddress()); + cmd.setStoragePort(storagePool.getPort()); - if (lastHostId != null) { - Answer answer = _agentMgr.easySend(lastHostId, cmd); + cmd.set_iScsiName(volume.get_iScsiName()); - if (answer != null && answer.getResult()) { - VolumeInfo volumeInfo = volFactory.getVolume(volume.getId()); - HostVO host = _hostDao.findById(lastHostId); + Answer answer = _agentMgr.easySend(lastHostId, cmd); - volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); - } else { - s_logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); + if (answer != null && answer.getResult()) { + volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); + } else { + s_logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); + } } } } diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index 224fa78370f..bea9b4ad5bf 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -18,6 +18,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.api.ApiDBUtils; @@ -475,7 +476,17 @@ public VolumeVO doInTransaction(TransactionStatus status) { } } else { volume.setDiskOfferingId(diskOfferingId); + + DiskOfferingVO diskOfferingVO = _diskOfferingDao.findById(diskOfferingId); + + Boolean isCustomizedIops = diskOfferingVO != null && diskOfferingVO.isCustomizedIops() != null ? diskOfferingVO.isCustomizedIops() : false; + + if (isCustomizedIops == null || !isCustomizedIops) { + volume.setMinIops(diskOfferingVO.getMinIops()); + volume.setMaxIops(diskOfferingVO.getMaxIops()); + } } + // volume.setSize(size); volume.setInstanceId(null); volume.setUpdated(new Date()); @@ -845,10 +856,10 @@ protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId, Lo @DB @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException { - Long newSize = null; - Long newMinIops = null; - Long newMaxIops = null; - Integer newHypervisorSnapshotReserve = null; + Long newSize; + Long newMinIops; + Long newMaxIops; + Integer newHypervisorSnapshotReserve; boolean shrinkOk = cmd.getShrinkOk(); VolumeVO volume = _volsDao.findById(cmd.getEntityId()); @@ -865,13 +876,6 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep /* Does the caller have authority to act on this volume? */ _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume); - if(volume.getInstanceId() != null) { - // Check that Vm to which this volume is attached does not have VM Snapshots - if (_vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) { - throw new InvalidParameterValueException("Volume cannot be resized which is attached to VM with VM Snapshots"); - } - } - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); DiskOfferingVO newDiskOffering = null; @@ -916,7 +920,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMinIops = cmd.getMinIops(); if (newMinIops != null) { - if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) { + if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) { throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter."); } } @@ -928,7 +932,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMaxIops = cmd.getMaxIops(); if (newMaxIops != null) { - if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) { + if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) { throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter."); } } @@ -967,9 +971,13 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep throw new InvalidParameterValueException("The new disk offering requires that a size be specified."); } - // convert from bytes to GiB + // convert from GiB to bytes newSize = newSize << 30; } else { + if (cmd.getSize() != null) { + throw new InvalidParameterValueException("You cannnot pass in a custom disk size to a non-custom disk offering."); + } + newSize = newDiskOffering.getDiskSize(); } @@ -996,10 +1004,35 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep // if the caller is looking to change the size of the volume if (currentSize != newSize) { + if (volume.getInstanceId() != null) { + // Check that VM to which this volume is attached does not have VM snapshots + if (_vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) { + throw new InvalidParameterValueException("A volume that is attached to a VM with any VM snapshots cannot be resized."); + } + } + if (!validateVolumeSizeRange(newSize)) { throw new InvalidParameterValueException("Requested size out of range"); } + Long storagePoolId = volume.getPoolId(); + + if (storagePoolId != null) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); + + if (storagePoolVO.isManaged()) { + Long instanceId = volume.getInstanceId(); + + if (instanceId != null) { + VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId); + + if (vmInstanceVO.getHypervisorType() == HypervisorType.KVM && vmInstanceVO.getState() != State.Stopped) { + throw new CloudRuntimeException("This kind of KVM disk cannot be resized while it is connected to a VM that's not in the Stopped state."); + } + } + } + } + /* * Let's make certain they (think they) know what they're doing if they * want to shrink by forcing them to provide the shrinkok parameter. @@ -1161,19 +1194,32 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n // this call to resize has a different impact depending on whether the // underlying primary storage is managed or not - // if managed, this is the chance for the plug-in to change IOPS value, if applicable + // if managed, this is the chance for the plug-in to change the size and/or IOPS values // if not managed, this is the chance for the plug-in to talk to the hypervisor layer // to change the size of the disk AsyncCallFuture future = volService.resize(vol); VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.warn("Failed to resize the volume " + volume); + String details = ""; + if (result.getResult() != null && !result.getResult().isEmpty()) { + details = result.getResult(); + } + throw new CloudRuntimeException(details); + } + // managed storage is designed in such a way that the storage plug-in does not // talk to the hypervisor layer; as such, if the storage is managed and the // current and new sizes are different, then CloudStack (i.e. not a storage plug-in) // needs to tell the hypervisor to resize the disk if (storagePool.isManaged() && currentSize != newSize) { if (hosts != null && hosts.length > 0) { - volService.resizeVolumeOnHypervisor(volumeId, newSize, hosts[0], instanceName); + HostVO hostVO = _hostDao.findById(hosts[0]); + + if (hostVO.getHypervisorType() != HypervisorType.KVM) { + volService.resizeVolumeOnHypervisor(volumeId, newSize, hosts[0], instanceName); + } } volume.setSize(newSize); @@ -1181,20 +1227,12 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n _volsDao.update(volume.getId(), volume); } - if (result.isFailed()) { - s_logger.warn("Failed to resize the volume " + volume); - String details = ""; - if (result.getResult() != null && !result.getResult().isEmpty()) { - details = result.getResult(); - } - throw new CloudRuntimeException(details); - } - volume = _volsDao.findById(volume.getId()); if (newDiskOfferingId != null) { volume.setDiskOfferingId(newDiskOfferingId); } + if (currentSize != newSize) { volume.setSize(newSize); } @@ -1851,6 +1889,8 @@ private Volume orchestrateDetachVolumeFromVM(long vmId, long volumeId) { volService.revokeAccess(volFactory.getVolume(volume.getId()), host, dataStore); + handleTargetsForVMware(hostId, volumePool.getHostAddress(), volumePool.getPort(), volume.get_iScsiName()); + return _volsDao.findById(volumeId); } else { @@ -1885,6 +1925,46 @@ public void updateMissingRootDiskController(final VMInstanceVO vm, final String } } + private void handleTargetsForVMware(long hostId, String storageAddress, int storagePort, String iScsiName) { + HostVO host = _hostDao.findById(hostId); + + if (host.getHypervisorType() == HypervisorType.VMware) { + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + List> targets = new ArrayList<>(); + + Map target = new HashMap<>(); + + target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress); + target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort)); + target.put(ModifyTargetsCommand.IQN, iScsiName); + + targets.add(target); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(false); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); + + sendModifyTargetsCommand(cmd, hostId); + } + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + String msg = "Unable to get an answer to the modify targets command"; + + s_logger.warn(msg); + } + else if (!answer.getResult()) { + String msg = "Unable to modify target on the following host: " + hostId; + + s_logger.warn(msg); + } + } + @DB @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_MIGRATE, eventDescription = "migrating volume", async = true) @@ -2088,6 +2168,7 @@ public Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Acco } StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); + if (storagePoolVO.isManaged() && locationType == null) { locationType = Snapshot.LocationType.PRIMARY; } @@ -2206,6 +2287,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, } StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); + if (!storagePoolVO.isManaged() && locationType != null) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " LocationType is supported only for managed storage"); } @@ -2295,7 +2377,7 @@ public String extractVolume(ExtractVolumeCmd cmd) { throw new InvalidParameterValueException("Please specify a valid zone."); } if (volume.getPoolId() == null) { - throw new InvalidParameterValueException("The volume doesnt belong to a storage pool so cant extract it"); + throw new InvalidParameterValueException("The volume doesn't belong to a storage pool so can't extract it"); } // Extract activity only for detached volumes or for volumes whose // instance is stopped @@ -2419,9 +2501,15 @@ private String orchestrateExtractVolume(long volumeId, long zoneId) { String extractUrl = secStore.createEntityExtractUrl(vol.getPath(), vol.getFormat(), vol); VolumeDataStoreVO volumeStoreRef = _volumeStoreDao.findByVolume(volumeId); + volumeStoreRef.setExtractUrl(extractUrl); volumeStoreRef.setExtractUrlCreated(DateUtil.now()); + volumeStoreRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + volumeStoreRef.setDownloadPercent(100); + volumeStoreRef.setZoneId(zoneId); + _volumeStoreDao.update(volumeStoreRef.getId(), volumeStoreRef); + return extractUrl; } @@ -2630,6 +2718,7 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L // Mark the volume as attached if (sendCommand) { DiskTO disk = answer.getDisk(); + _volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), disk.getDiskSeq()); volumeToAttach = _volsDao.findById(volumeToAttach.getId()); @@ -2643,6 +2732,14 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L deviceId = getDeviceId(vm, deviceId); _volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId); + + volumeToAttach = _volsDao.findById(volumeToAttach.getId()); + + if (vm.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) { + volumeToAttach.setPath(volumeToAttach.get_iScsiName()); + + _volsDao.update(volumeToAttach.getId(), volumeToAttach); + } } // insert record for disk I/O statistics diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 5e981464672..bd49c05f43e 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -291,7 +291,9 @@ public Snapshot revertSnapshot(Long snapshotId) { } } - SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image); + DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr); + + SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotId, dataStoreRole); if (snapshotInfo == null) { throw new CloudRuntimeException("snapshot:" + snapshotId + " not exist in data store"); } @@ -548,6 +550,10 @@ public boolean deleteSnapshot(long snapshotId) { return false; } + DataStoreRole dataStoreRole = getDataStoreRole(snapshotCheck, _snapshotStoreDao, dataStoreMgr); + + SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole); + try { boolean result = snapshotStrategy.deleteSnapshot(snapshotId); @@ -562,8 +568,6 @@ public boolean deleteSnapshot(long snapshotId) { } if (snapshotCheck.getState() == Snapshot.State.BackedUp) { - SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Image); - if (snapshotStoreRef != null) { _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize())); } @@ -1396,8 +1400,16 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, hypervisorType = storagePool.getHypervisor(); // at the time being, managed storage only supports XenServer, ESX(i), and KVM (i.e. not Hyper-V), so the VHD file type can be mapped to XenServer - if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType) && ImageFormat.VHD.equals(volume.getFormat())) { - hypervisorType = HypervisorType.XenServer; + if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType)) { + if (ImageFormat.VHD.equals(volume.getFormat())) { + hypervisorType = HypervisorType.XenServer; + } + else if (ImageFormat.OVA.equals(volume.getFormat())) { + hypervisorType = HypervisorType.VMware; + } + else if (ImageFormat.QCOW2.equals(volume.getFormat())) { + hypervisorType = HypervisorType.KVM; + } } } else { hypervisorType = volume.getHypervisorType(); diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 0470874f213..dab741c3c27 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -70,6 +70,7 @@ import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMNetworkMapDao; @@ -109,6 +110,7 @@ import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.GetVolumeStatsAnswer; import com.cloud.agent.api.GetVolumeStatsCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.RestoreVMSnapshotAnswer; import com.cloud.agent.api.RestoreVMSnapshotCommand; @@ -1116,6 +1118,20 @@ private UserVm upgradeStoppedVirtualMachine(Long vmId, Long svcOffId, Map vols = _volsDao.findReadyRootVolumesByInstance(vmInstance.getId()); + + for (final VolumeVO rootVolumeOfVm : vols) { + rootVolumeOfVm.setDiskOfferingId(newROOTDiskOffering.getId()); + + _volsDao.update(rootVolumeOfVm.getId(), rootVolumeOfVm); + + ResizeVolumeCmd resizeVolumeCmd = new ResizeVolumeCmd(rootVolumeOfVm.getId(), newROOTDiskOffering.getMinIops(), newROOTDiskOffering.getMaxIops()); + + _volumeService.resizeVolume(resizeVolumeCmd); + } + // Check if the new service offering can be applied to vm instance ServiceOffering newSvcOffering = _offeringDao.findById(svcOffId); Account owner = _accountMgr.getActiveAccountById(vmInstance.getAccountId()); @@ -4955,7 +4971,7 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { List vols = _volsDao.findByInstance(vm.getId()); if (vols.size() > 1) { - throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to dettach data disks at first"); + throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); } // Check that Vm does not have VM Snapshots @@ -4969,7 +4985,7 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { destPool.getClusterId()).getHypervisorType(); } - if (vm.getHypervisorType() != destHypervisorType) { + if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) { throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString()); } _itMgr.storageMigration(vm.getUuid(), destPool); @@ -5384,13 +5400,47 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio } // Check if the source and destination hosts are of the same type and support storage motion. - if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()))) { - throw new CloudRuntimeException("The source and destination hosts are not of the same type. " + "Source hypervisor type and version: " - + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " - + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); + if (!srcHost.getHypervisorType().equals(destinationHost.getHypervisorType())) { + throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. Source hypervisor type and version: " + + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); + } + + String srcHostVersion = srcHost.getHypervisorVersion(); + String destinationHostVersion = destinationHost.getHypervisorVersion(); + + if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) { + if (srcHostVersion == null) { + srcHostVersion = ""; + } + + if (destinationHostVersion == null) { + destinationHostVersion = ""; + } + } + + if (!srcHostVersion.equals(destinationHostVersion)) { + throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. Source hypervisor type and version: " + + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); } HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion()); + + if (capabilities == null && HypervisorType.KVM.equals(srcHost.getHypervisorType())) { + List lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); + + if (lstHypervisorCapabilities != null) { + for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) { + if (hypervisorCapabilities.isStorageMotionSupported()) { + capabilities = hypervisorCapabilities; + + break; + } + } + } + } + if (!capabilities.isStorageMotionSupported()) { throw new CloudRuntimeException("Migration with storage isn't supported on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); } @@ -6168,9 +6218,10 @@ public UserVm restoreVMInternal(Account caller, UserVmVO vm, Long newTemplateId) } private void handleManagedStorage(UserVmVO vm, VolumeVO root) { - if ( Volume.State.Allocated.equals(root.getState()) ){ + if (Volume.State.Allocated.equals(root.getState())) { return; } + StoragePoolVO storagePool = _storagePoolDao.findById(root.getPoolId()); if (storagePool != null && storagePool.isManaged()) { @@ -6203,7 +6254,7 @@ else if (host.getHypervisorType() == HypervisorType.VMware) { Map details = primaryDataStore.getDetails(); if (details == null) { - details = new HashMap(); + details = new HashMap<>(); primaryDataStore.setDetails(details); } @@ -6212,38 +6263,87 @@ else if (host.getHypervisorType() == HypervisorType.VMware) { cmd = new DeleteCommand(volumeInfo.getTO()); } + else if (host.getHypervisorType() == HypervisorType.KVM) { + cmd = null; + } else { throw new CloudRuntimeException("This hypervisor type is not supported on managed storage for this command."); } - Commands cmds = new Commands(Command.OnError.Stop); + if (cmd != null) { + Commands cmds = new Commands(Command.OnError.Stop); - cmds.addCommand(cmd); + cmds.addCommand(cmd); - try { - _agentMgr.send(hostId, cmds); - } - catch (Exception ex) { - throw new CloudRuntimeException(ex.getMessage()); - } + try { + _agentMgr.send(hostId, cmds); + } catch (Exception ex) { + throw new CloudRuntimeException(ex.getMessage()); + } - if (!cmds.isSuccessful()) { - for (Answer answer : cmds.getAnswers()) { - if (!answer.getResult()) { - s_logger.warn("Failed to reset vm due to: " + answer.getDetails()); + if (!cmds.isSuccessful()) { + for (Answer answer : cmds.getAnswers()) { + if (!answer.getResult()) { + s_logger.warn("Failed to reset vm due to: " + answer.getDetails()); - throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); + throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); + } } } } // root.getPoolId() should be null if the VM we are detaching the disk from has never been started before DataStore dataStore = root.getPoolId() != null ? _dataStoreMgr.getDataStore(root.getPoolId(), DataStoreRole.Primary) : null; + volumeMgr.revokeAccess(volFactory.getVolume(root.getId()), host, dataStore); + + if (dataStore != null) { + handleTargetsForVMware(host.getId(), storagePool.getHostAddress(), storagePool.getPort(), root.get_iScsiName()); + } } } } + private void handleTargetsForVMware(long hostId, String storageAddress, int storagePort, String iScsiName) { + HostVO host = _hostDao.findById(hostId); + + if (host.getHypervisorType() == HypervisorType.VMware) { + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + List> targets = new ArrayList<>(); + + Map target = new HashMap<>(); + + target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress); + target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort)); + target.put(ModifyTargetsCommand.IQN, iScsiName); + + targets.add(target); + + cmd.setTargets(targets); + cmd.setApplyToAllHostsInCluster(true); + cmd.setAdd(false); + cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); + + sendModifyTargetsCommand(cmd, hostId); + } + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + String msg = "Unable to get an answer to the modify targets command"; + + s_logger.warn(msg); + } + else if (!answer.getResult()) { + String msg = "Unable to modify target on the following host: " + hostId; + + s_logger.warn(msg); + } + } + @Override public void prepareStop(VirtualMachineProfile profile) { UserVmVO vm = _vmDao.findById(profile.getId()); diff --git a/server/test/com/cloud/storage/snapshot/SnapshotManagerTest.java b/server/test/com/cloud/storage/snapshot/SnapshotManagerTest.java index 05eb8b96a3a..39eb703300b 100755 --- a/server/test/com/cloud/storage/snapshot/SnapshotManagerTest.java +++ b/server/test/com/cloud/storage/snapshot/SnapshotManagerTest.java @@ -16,12 +16,6 @@ // under the License. package com.cloud.storage.snapshot; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.util.List; import java.util.UUID; @@ -79,6 +73,11 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SnapshotManagerTest { @Spy @@ -126,7 +125,7 @@ @Mock DataStore storeMock; @Mock - SnapshotDataStoreDao _snapshotStoreDao; + SnapshotDataStoreDao snapshotStoreDao; @Mock SnapshotDataStoreVO snapshotStoreMock; @Mock @@ -153,7 +152,7 @@ public void setup() throws ResourceAllocationException { _snapshotMgr._storagePoolDao = _storagePoolDao; _snapshotMgr._resourceMgr = _resourceMgr; _snapshotMgr._vmSnapshotDao = _vmSnapshotDao; - _snapshotMgr._snapshotStoreDao = _snapshotStoreDao; + _snapshotMgr._snapshotStoreDao = snapshotStoreDao; when(_snapshotDao.findById(anyLong())).thenReturn(snapshotMock); when(snapshotMock.getVolumeId()).thenReturn(TEST_VOLUME_ID); @@ -260,6 +259,7 @@ public void testDeleteSnapshotF1() { when(snapshotMock.getState()).thenReturn(Snapshot.State.Destroyed); when(snapshotMock.getAccountId()).thenReturn(2L); when(snapshotMock.getDataCenterId()).thenReturn(2L); + _snapshotMgr.deleteSnapshot(TEST_SNAPSHOT_ID); } @@ -311,14 +311,14 @@ public void testBackupSnapshotFromVmSnapshotF2() { when(_vmDao.findById(anyLong())).thenReturn(vmMock); when(vmMock.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); when(_vmSnapshotDao.findById(anyLong())).thenReturn(vmSnapshotMock); - when(_snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(null); + when(snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(null); when(snapshotFactory.getSnapshot(anyLong(), Mockito.any(DataStore.class))).thenReturn(snapshotInfoMock); when(storeMock.create(snapshotInfoMock)).thenReturn(snapshotInfoMock); - when(_snapshotStoreDao.findBySnapshot(anyLong(), any(DataStoreRole.class))).thenReturn(snapshotStoreMock); - when(_snapshotStoreDao.update(anyLong(), any(SnapshotDataStoreVO.class))).thenReturn(true); + when(snapshotStoreDao.findBySnapshot(anyLong(), any(DataStoreRole.class))).thenReturn(snapshotStoreMock); + when(snapshotStoreDao.update(anyLong(), any(SnapshotDataStoreVO.class))).thenReturn(true); when(_snapshotDao.update(anyLong(), any(SnapshotVO.class))).thenReturn(true); when(vmMock.getAccountId()).thenReturn(2L); - when(snapshotStrategy.backupSnapshot(any(SnapshotInfo.class))).thenReturn(snapshotInfoMock);;; + when(snapshotStrategy.backupSnapshot(any(SnapshotInfo.class))).thenReturn(snapshotInfoMock); Snapshot snapshot = _snapshotMgr.backupSnapshotFromVmSnapshot(TEST_SNAPSHOT_ID, TEST_VM_ID, TEST_VOLUME_ID, TEST_VM_SNAPSHOT_ID); Assert.assertNotNull(snapshot); @@ -330,7 +330,7 @@ public void testBackupSnapshotFromVmSnapshotF3() { when(_vmDao.findById(anyLong())).thenReturn(vmMock); when(vmMock.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); when(_vmSnapshotDao.findById(anyLong())).thenReturn(vmSnapshotMock); - when(_snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(snapshotStoreMock); + when(snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(snapshotStoreMock); when(snapshotStoreMock.getInstallPath()).thenReturn("VM_SNAPSHOT_NAME"); when(vmSnapshotMock.getName()).thenReturn("VM_SNAPSHOT_NAME"); Snapshot snapshot = _snapshotMgr.backupSnapshotFromVmSnapshot(TEST_SNAPSHOT_ID, TEST_VM_ID, TEST_VOLUME_ID, TEST_VM_SNAPSHOT_ID); diff --git a/test/integration/plugins/solidfire/TestAddRemoveHosts.py b/test/integration/plugins/solidfire/TestAddRemoveHosts.py index 366c127f9ae..d9118dd6cab 100644 --- a/test/integration/plugins/solidfire/TestAddRemoveHosts.py +++ b/test/integration/plugins/solidfire/TestAddRemoveHosts.py @@ -51,6 +51,9 @@ # Check that ip_address_of_new_xenserver_host / ip_address_of_new_kvm_host is correct. # If using XenServer, verify the "xen_server_master_hostname" variable is correct. # If using KVM, verify the "kvm_1_ip_address" variable is correct. +# +# Note: +# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0] class TestData: @@ -92,18 +95,18 @@ class TestData: # modify to control which hypervisor type to test hypervisor_type = xenServer xen_server_master_hostname = "XenServer-6.5-1" - kvm_1_ip_address = "192.168.129.84" - ip_address_of_new_xenserver_host = "192.168.129.243" - ip_address_of_new_kvm_host = "192.168.129.3" + kvm_1_ip_address = "10.117.40.112" + ip_address_of_new_xenserver_host = "10.117.40.107" + ip_address_of_new_kvm_host = "10.117.40.116" def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.kvm: { TestData.username: "root", @@ -144,7 +147,7 @@ def __init__(self): TestData.primaryStorage: { TestData.name: "SolidFire-%d" % random.randint(0, 100), TestData.scope: "ZONE", - TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -157,7 +160,7 @@ def __init__(self): TestData.primaryStorage2: { TestData.name: "SolidFireShared-%d" % random.randint(0, 100), TestData.scope: "CLUSTER", - TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "minIops=5000;maxIops=50000;burstIops=75000", TestData.provider: "SolidFireShared", @@ -191,7 +194,7 @@ def __init__(self): TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } @@ -223,7 +226,7 @@ def setUpClass(cls): # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -753,7 +756,7 @@ def _get_kvm_iqn(self, ip_address, username, password): searchFor = "InitiatorName=" - stdin, stdout, stderr = ssh_connection.exec_command("sudo grep " + searchFor + " /etc/iscsi/initiatorname.iscsi") + stdout = ssh_connection.exec_command("sudo grep " + searchFor + " /etc/iscsi/initiatorname.iscsi")[1] result = stdout.read() diff --git a/test/integration/plugins/solidfire/TestManagedSystemVMs.py b/test/integration/plugins/solidfire/TestManagedSystemVMs.py index 2bfbe4aefb8..5e9884d406a 100644 --- a/test/integration/plugins/solidfire/TestManagedSystemVMs.py +++ b/test/integration/plugins/solidfire/TestManagedSystemVMs.py @@ -93,17 +93,17 @@ class TestData(): zoneId = "zoneid" # modify to control which hypervisor type to test - hypervisor_type = kvm + hypervisor_type = xenServer xen_server_hostname = "XenServer-6.5-1" def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.kvm: { TestData.username: "root", @@ -130,7 +130,7 @@ def __init__(self): TestData.primaryStorage: { TestData.name: TestData.get_name_for_solidfire_storage(), TestData.scope: "ZONE", - TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -184,7 +184,7 @@ def __init__(self): TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } @staticmethod @@ -225,7 +225,7 @@ def setUpClass(cls): # Get Resources from Cloud Infrastructure cls.zone = Zone(get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]).__dict__) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py index e1b9aafc10c..fab509e4b69 100644 --- a/test/integration/plugins/solidfire/TestSnapshots.py +++ b/test/integration/plugins/solidfire/TestSnapshots.py @@ -19,7 +19,6 @@ import random import SignedAPICall import time -import XenAPI from solidfire.factory import ElementFactory @@ -36,7 +35,7 @@ from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume # common - commonly used methods for all tests are listed here -from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes, list_snapshots +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_volumes, list_snapshots # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources, wait_until @@ -76,6 +75,7 @@ class TestData(): user = "user" username = "username" virtualMachine = "virtualmachine" + vmWare = "vmware" volume_1 = "volume_1" volume_2 = "volume_2" xenServer = "xenserver" @@ -87,16 +87,16 @@ class TestData(): def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.primaryStorage: { "name": "SolidFire-%d" % random.randint(0, 100), TestData.scope: "ZONE", - "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -157,7 +157,7 @@ def __init__(self): TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } @@ -176,6 +176,8 @@ class TestSnapshots(cloudstackTestCase): _should_be_six_volumes_in_list_err_msg = "There should be six volumes in this list." _should_be_seven_volumes_in_list_err_msg = "There should be seven volumes in this list." _should_be_five_items_in_list_err_msg = "There should be five items in this list." + _should_be_six_items_in_list_err_msg = "There should be six items in this list." + _should_be_seven_items_in_list_err_msg = "There should be seven items in this list." _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer." @classmethod @@ -197,7 +199,7 @@ def setUpClass(cls): # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -304,7 +306,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -313,11 +315,11 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) - vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) self._delete_and_test_snapshot(vol_snap_2) @@ -325,9 +327,9 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_3) - vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) virtual_machine.delete(self.apiClient, True) @@ -336,7 +338,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) self._delete_and_test_snapshot(vol_snap_1) @@ -345,7 +347,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) self._delete_and_test_snapshot(vol_snap_2) @@ -381,20 +383,20 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) - vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) - services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) @@ -429,16 +431,16 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume.id, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) - services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) @@ -449,9 +451,9 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) @@ -469,9 +471,9 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) volume_created_from_snapshot = virtual_machine_2.attach_volume( self.apiClient, @@ -489,8 +491,8 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) virtual_machine_2.delete(self.apiClient, True) @@ -499,7 +501,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) list_volumes_response = list_volumes( self.apiClient, @@ -555,7 +557,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -626,7 +628,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -645,7 +647,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): vol_snap_3 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 3, primary_storage_db_id, sf_volume_size, sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) - services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) @@ -680,7 +682,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots @@ -693,7 +695,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): vol_snap_a = self._create_and_test_snapshot_2(vm_2_root_volume.id, sf_volume_id_2, sf_volume_id + 5, primary_storage_db_id, sf_volume_size_2, sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) - services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) @@ -704,9 +706,9 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) @@ -724,8 +726,8 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) volume_created_from_snapshot = virtual_machine_2.attach_volume( self.apiClient, @@ -794,7 +796,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): virtual_machine.stop(self.apiClient, True) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) sf_volume_id = sf_volume.volume_id sf_volume_size = sf_volume.total_size @@ -807,7 +809,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) - services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_1.id, services, account=self.account.name, domainid=self.domain.id) @@ -818,7 +820,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) sf_util.check_list(sf_volume_2.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) @@ -914,7 +916,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -934,11 +936,11 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): sf_util.set_supports_resign(True, self.dbConnection) - vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) - vol_snap_b = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_b = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) - services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} template_1 = Template.create_from_snapshot(self.apiClient, vol_snap_1, services) @@ -973,14 +975,14 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id) @@ -994,7 +996,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): volume_created_from_snapshot_1 ) - services = {"displaytext": "Template-A", "name": "Template-A-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + services = {"displaytext": "Template-A", "name": "Template-A-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} template_a = Template.create_from_snapshot(self.apiClient, vol_snap_a, services) @@ -1029,14 +1031,14 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_3_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_3.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id) @@ -1147,7 +1149,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -1158,11 +1160,11 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) vol_snap_3_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume) - vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) self._delete_and_test_archive_snapshot(vol_snap_3_archive) @@ -1174,7 +1176,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) virtual_machine.delete(self.apiClient, True) @@ -1183,7 +1185,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) self._delete_and_test_archive_snapshot(vol_snap_1_archive) @@ -1192,7 +1194,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) self._delete_and_test_snapshot(vol_snap_2) @@ -1228,22 +1230,22 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) vol_snap_2_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume) - vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) vol_snap_4_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume) - services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} template = Template.create_from_snapshot(self.apiClient, vol_snap_2_archive, services) @@ -1278,7 +1280,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots @@ -1298,9 +1300,9 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) @@ -1318,9 +1320,9 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) volume_created_from_snapshot = virtual_machine_2.attach_volume( self.apiClient, @@ -1340,8 +1342,8 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) - sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) virtual_machine_2.delete(self.apiClient, True) @@ -1350,7 +1352,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) list_volumes_response = list_volumes( self.apiClient, @@ -1370,9 +1372,8 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) - @attr(hypervisor='KVM') def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapshot(self): - if TestData.hypervisor_type != TestData.kvm: + if TestData.hypervisor_type != TestData.vmWare and TestData.hypervisor_type != TestData.kvm: return virtual_machine = VirtualMachine.create( @@ -1404,7 +1405,7 @@ def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapsh sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) - sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots @@ -1413,9 +1414,56 @@ def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapsh primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + virtual_machine.stop(self.apiClient, False) + + if TestData.hypervisor_type == TestData.vmWare: + try: + Volume.revertToSnapshot(self.apiClient, vol_snap_1.id) + + self.assertTrue(False, "An exception should have been thrown when trying to revert a volume to a snapshot and the volume is a root disk on VMware.") + except: + pass + else: + Volume.revertToSnapshot(self.apiClient, vol_snap_1.id) + + virtual_machine.start(self.apiClient) + + try: + Volume.revertToSnapshot(self.apiClient, vol_snap_1.id) + + self.assertTrue(False, "An exception should have been thrown when trying to revert a volume to a snapshot and the volume is attached to a running VM.") + except: + pass + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id) + + volume_created_from_snapshot_name = volume_created_from_snapshot.name + + # Get volume information from SolidFire cluster + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + + sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name) + + self._delete_and_test_snapshot(vol_snap_2) + + self._delete_and_test_snapshot(vol_snap_1) + + vol_snap_1 = self._create_and_test_snapshot(volume_created_from_snapshot, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(volume_created_from_snapshot, sf_volume_2, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) - vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) virtual_machine.stop(self.apiClient, False) @@ -1436,28 +1484,34 @@ def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapsh virtual_machine.delete(self.apiClient, True) - def _check_list_not_empty(self, in_list): - self.assertEqual( - isinstance(in_list, list), - True, - "'in_list' is not a list." - ) + volume_created_from_snapshot = Volume(volume_created_from_snapshot.__dict__) - self.assertGreater( - len(in_list), - 0, - "The size of 'in_list' must be greater than zero." - ) + volume_created_from_snapshot.delete(self.apiClient) # used when SolidFire snapshots are being used for CloudStack volume snapshots - def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size): - sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg) + def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, volume, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size): + if TestData.hypervisor_type == TestData.vmWare: + expected_num_details = 7 + + err_msg = TestSnapshots._should_be_seven_items_in_list_err_msg + else: + expected_num_details = 6 + + err_msg = TestSnapshots._should_be_six_items_in_list_err_msg + + volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, volume) + + sf_util.check_list(sf_snapshot_details, expected_num_details, self, err_msg) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "takeSnapshot", "true") self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "snapshotId", sf_snapshot_id) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "originalCloudStackVolumeId", volume_id) + + if TestData.hypervisor_type == TestData.vmWare: + self._check_snapshot_detail_with(str.endswith, sf_snapshot_details, cs_snapshot_id, "vmdk", ".vmdk") # used when SolidFire volumes are being used for CloudStack volume snapshots def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, storage_pool_id, sf_volume_size): @@ -1466,7 +1520,7 @@ def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volu self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id) self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size) - self._check_snapshot_detail_starts_with(sf_snapshot_details, cs_snapshot_id, "iqn", "/iqn.") + self._check_snapshot_detail_with(str.startswith, sf_snapshot_details, cs_snapshot_id, "iqn", "/iqn.") self._check_snapshot_detail_size(sf_snapshot_details, cs_snapshot_id, "path", 36) def _check_snapshot_detail(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, snapshot_detail_value): @@ -1480,16 +1534,16 @@ def _check_snapshot_detail(self, sf_snapshot_details_list, cs_snapshot_id, snaps raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and value '" + str(snapshot_detail_value) + "'.") - def _check_snapshot_detail_starts_with(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, starts_with): + def _check_snapshot_detail_with(self, with_f, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, with_str): for sf_snapshot_detail_dict in sf_snapshot_details_list: if sf_snapshot_detail_dict["volumeSnapshotId"] != cs_snapshot_id: raise Exception("This snapshot detail does not apply to the expected CloudStack volume snapshot.") if sf_snapshot_detail_dict["snapshotDetailsName"] == snapshot_detail_key: - if sf_snapshot_detail_dict["snapshotDetailsValue"].startswith(starts_with): + if with_f(str(sf_snapshot_detail_dict["snapshotDetailsValue"]), with_str): return - raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with' value '" + starts_with + "'.") + raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with/ends with' value '" + with_str + "'.") def _check_snapshot_detail_size(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, length): for sf_snapshot_detail_dict in sf_snapshot_details_list: @@ -1503,7 +1557,7 @@ def _check_snapshot_detail_size(self, sf_snapshot_details_list, cs_snapshot_id, raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'length' value '" + str(length) + "'.") def _most_recent_sf_snapshot(self, sf_snapshots): - self._check_list_not_empty(sf_snapshots) + sf_util.check_list_not_empty(self, sf_snapshots) most_recent_id = 0 sf_snapshot_to_return = None @@ -1520,40 +1574,13 @@ def _most_recent_sf_snapshot(self, sf_snapshots): return sf_snapshot_to_return def _get_cs_volume_snapshot_db_id(self, vol_snap): - return self._get_db_id("snapshots", vol_snap) + return sf_util.get_db_id(self.dbConnection, "snapshots", vol_snap) def _get_cs_storage_pool_db_id(self, storage_pool): - return self._get_db_id("storage_pool", storage_pool) - - def _get_db_id(self, table, db_obj): - sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" - - # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench - sql_result = self.dbConnection.execute(sql_query) - - return sql_result[0][0] - - def _get_sf_volume_by_name(self, sf_volumes, sf_volume_name): - self._check_list_not_empty(sf_volumes) - - sf_volume = None - - for volume in sf_volumes: - if volume.name == sf_volume_name: - sf_volume = volume - - break - - self.assertNotEqual( - sf_volume, - None, - "The SolidFire volume could not be found in the expected account." - ) - - return sf_volume + return sf_util.get_db_id(self.dbConnection, "storage_pool", storage_pool) def _get_sf_volume_by_id(self, sf_volumes, sf_volume_id): - self._check_list_not_empty(sf_volumes) + sf_util.check_list_not_empty(self, sf_volumes) sf_volume = None @@ -1596,10 +1623,10 @@ def _check_snapshot_details_do_not_exist(self, vol_snap_db_id): ) # used when SolidFire snapshots are being used for CloudStack volume snapshots - def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_storage_db_id, expected_num_snapshots, snapshot_err_msg): + def _create_and_test_snapshot(self, volume_for_snapshot, sf_volume, primary_storage_db_id, expected_num_snapshots, snapshot_err_msg): vol_snap = Snapshot.create( self.apiClient, - volume_id=volume_id_for_snapshot + volume_id=volume_for_snapshot.id ) self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) @@ -1619,7 +1646,7 @@ def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_s vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) - self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size) + self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, volume_for_snapshot, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size) return vol_snap diff --git a/test/integration/plugins/solidfire/TestUploadDownload.py b/test/integration/plugins/solidfire/TestUploadDownload.py new file mode 100644 index 00000000000..d81600e9128 --- /dev/null +++ b/test/integration/plugins/solidfire/TestUploadDownload.py @@ -0,0 +1,516 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import urllib2 + +from solidfire.factory import ElementFactory + +from util import sf_util + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources, wait_until + +# Prerequisites: +# Only one zone +# Only one pod +# Only one cluster + +# Note: +# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0] +# Set extract.url.cleanup.interval to 240. +# Set extract.url.expiration.interval to 120. + + +class TestData: + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + kvm = "kvm" + login = "login" + mvip = "mvip" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + tags = "tags" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + volume_1 = "volume_1" + xenServer = "xenserver" + zoneId = "zoneId" + + # modify to control which hypervisor type to test + hypervisor_type = kvm + volume_url = "http://10.117.40.114/tiny-centos-63.qcow2" + file_type = "QCOW2" + properties_file = "volume.properties" + install_path_index = 14 + secondary_storage_server = "10.117.40.114" + secondary_storage_server_root = "/export/secondary/" + secondary_storage_server_username = "cloudstack" + secondary_storage_server_password = "solidfire" + # "HTTP_DOWNLOAD" and "FTP_UPLOAD" are valid for download_mode, but they lead to the same behavior + download_mode = "HTTP_DOWNLOAD" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "10.117.40.120", + TestData.username: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://10.117.40.120:443" + }, + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + TestData.username: "test", + TestData.password: "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.primaryStorage: { + "name": "SolidFire-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.virtualMachine: { + "name": "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + "name": "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag + }, + TestData.diskOffering: { + "name": "SF_DO_1", + "displaytext": "SF_DO_1 Custom Size", + "customizediops": False, + "miniops": 5000, + "maxiops": 10000, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + TestData.volume_1: { + "diskname": "testvolume", + }, + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "10.117.40.114" + } + + +class TestUploadDownload(cloudstackTestCase): + errorText = "should be either detached or the VM should be in stopped state" + assertText = "The length of the response for the 'volume_store_ref' result should be equal to 1." + assertText2 = "The length of the response for the 'volume_store_ref' result should be equal to 0." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestUploadDownload, cls).getClsTestClient() + + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + # Set up SolidFire connection + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[1] + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata[TestData.account], + admin=1 + ) + + # Set up connection to make customized API calls + user = User.create( + cls.apiClient, + cls.testdata[TestData.user], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering], + custom=True + ) + + # Create VM and volume for tests + cls.virtual_machine = VirtualMachine.create( + cls.apiClient, + cls.testdata[TestData.virtualMachine], + accountid=cls.account.name, + zoneid=cls.zone.id, + serviceofferingid=compute_offering.id, + templateid=cls.template.id, + domainid=cls.domain.id, + startvm=True + ) + + cls._cleanup = [ + compute_offering, + cls.disk_offering, + user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cls.virtual_machine.delete(cls.apiClient, True) + + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + sf_util.purge_solidfire_volumes(cls.sfe) + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiClient, self.cleanup) + except Exception as e: + logging.debug("Exception in tearDown(self): %s" % e) + + def test_01_upload_and_download_snapshot(self): + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=self.virtual_machine.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.") + + vm_root_volume = list_volumes_response[0] + + ### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage + + volume_name = "Volume-A" + services = {"format": TestData.file_type, "diskname": volume_name} + + uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id, + account=self.account.name, domainid=self.account.domainid, + url=TestData.volume_url, diskofferingid=self.disk_offering.id) + + self._wait_for_volume_state(uploaded_volume.id, "Uploaded") + + uploaded_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, uploaded_volume) + + result = self._get_volume_store_ref_row(uploaded_volume_id) + + self.assertEqual( + len(result), + 1, + TestUploadDownload.assertText + ) + + install_path = self._get_install_path(result[0][TestData.install_path_index]) + + self._verify_uploaded_volume_present(install_path) + + uploaded_volume = self.virtual_machine.attach_volume( + self.apiClient, + uploaded_volume + ) + + uploaded_volume = sf_util.check_and_get_cs_volume(self, uploaded_volume.id, volume_name, self) + + sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") + + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + + self.assertNotEqual( + len(sf_volumes), + 0, + "The length of the response for the SolidFire-volume query should not be zero." + ) + + sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self) + + sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, uploaded_volume, self) + + sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self) + + sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) + + sf_util.check_vag(sf_volume, sf_vag_id, self) + + result = self._get_volume_store_ref_row(uploaded_volume_id) + + self.assertEqual( + len(result), + 0, + TestUploadDownload.assertText2 + ) + + self._verify_uploaded_volume_not_present(install_path) + + ### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file + ### and downloading the file + + try: + # for data disk + Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode) + + raise Exception("The volume extraction (for the data disk) did not fail (as expected).") + except Exception as e: + if TestUploadDownload.errorText in str(e): + pass + else: + raise + + vm_root_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, vm_root_volume) + + try: + # for root disk + Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode) + + raise Exception("The volume extraction (for the root disk) did not fail (as expected).") + except Exception as e: + if TestUploadDownload.errorText in str(e): + pass + else: + raise + + self.virtual_machine.stop(self.apiClient) + + self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)") + + result = self._get_volume_store_ref_row(vm_root_volume_id) + + self.assertEqual( + len(result), + 0, + TestUploadDownload.assertText2 + ) + + self._extract_volume_and_verify(vm_root_volume_id, "Unable to locate the extracted file for the root disk") + + uploaded_volume = self.virtual_machine.detach_volume( + self.apiClient, + uploaded_volume + ) + + self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)") + + uploaded_volume = Volume(uploaded_volume.__dict__) + + uploaded_volume.delete(self.apiClient) + + # self.virtual_machine.start(self.apiClient) + + def _verify_uploaded_volume_present(self, install_path, verify_properties_file=True): + result, result2 = self._get_results(install_path) + + self.assertFalse(result is None or len(result.strip()) == 0, "Unable to find the QCOW2 file") + + if verify_properties_file: + self.assertFalse(result2 is None or len(result2.strip()) == 0, "Unable to find the " + TestData.properties_file + " file") + + def _verify_uploaded_volume_not_present(self, install_path): + result, result2 = self._get_results(install_path) + + self.assertTrue(result is None or len(result.strip()) == 0, "QCOW2 file present, but should not be") + self.assertTrue(result2 is None or len(result2.strip()) == 0, TestData.properties_file + " file present, but should not be") + + def _get_results(self, install_path): + ssh_connection = sf_util.get_ssh_connection(TestData.secondary_storage_server, + TestData.secondary_storage_server_username, + TestData.secondary_storage_server_password) + + stdout = ssh_connection.exec_command("ls -l " + TestData.secondary_storage_server_root + + install_path + " | grep qcow2")[1] + + result = stdout.read() + + stdout = ssh_connection.exec_command("ls -l " + TestData.secondary_storage_server_root + + install_path + " | grep " + TestData.properties_file)[1] + + result2 = stdout.read() + + ssh_connection.close() + + return result, result2 + + def _get_install_path(self, install_path): + index = install_path.rfind('/') + + return install_path[:index] + + def _get_volume_store_ref_row(self, volume_id): + sql_query = "Select * From volume_store_ref Where volume_id = '" + str(volume_id) + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + return sql_result + + def _extract_volume_and_verify(self, volume_id, error_msg): + extract_result = Volume.extract(self.apiClient, volume_id, self.zone.id, TestData.download_mode) + + result = self._get_volume_store_ref_row(volume_id) + + self.assertEqual( + len(result), + 1, + TestUploadDownload.assertText + ) + + install_path = self._get_install_path(result[0][TestData.install_path_index]) + + self._verify_uploaded_volume_present(install_path, False) + + url_response = urllib2.urlopen(extract_result.url) + + if url_response.code != 200: + raise Exception(error_msg) + + self._wait_for_removal_of_extracted_volume(volume_id, extract_result.url) + + def _wait_for_removal_of_extracted_volume(self, volume_id, extract_result_url): + retry_interval = 60 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, self._check_removal_of_extracted_volume_state, volume_id, extract_result_url) + + if not wait_result: + raise Exception(return_val) + + def _check_removal_of_extracted_volume_state(self, volume_id, extract_result_url): + result = self._get_volume_store_ref_row(volume_id) + + if len(result) == 0: + try: + urllib2.urlopen(extract_result_url) + except Exception as e: + if "404" in str(e): + return True, "" + + return False, "The extracted volume has not been removed." + + def _wait_for_volume_state(self, volume_id, volume_state): + retry_interval = 30 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, TestUploadDownload._check_volume_state, self.apiClient, volume_id, volume_state) + + if not wait_result: + raise Exception(return_val) + + @staticmethod + def _check_volume_state(api_client, volume_id, volume_state): + volume = list_volumes( + api_client, + id=volume_id, + listall=True + )[0] + + if str(volume.state).lower() == volume_state.lower(): + return True, "" + + return False, "The volume is not in the '" + volume_state + "' state. State = " + str(volume.state) diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py index d563e5eb273..93ab3b6ff61 100644 --- a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py +++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py @@ -84,11 +84,11 @@ class TestData(): def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.xenServer: { TestData.username: "root", @@ -118,7 +118,7 @@ def __init__(self): TestData.primaryStorage: { TestData.name: "SolidFire-%d" % random.randint(0, 100), TestData.scope: "ZONE", - TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -132,7 +132,7 @@ def __init__(self): TestData.primaryStorage2: { TestData.name: "SolidFireShared-%d" % random.randint(0, 100), TestData.scope: "CLUSTER", - TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "minIops=5000;maxIops=50000;burstIops=75000", TestData.provider: "SolidFireShared", @@ -211,7 +211,7 @@ def __init__(self): TestData.clusterId1: 1, TestData.clusterId2: 2, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py index 880e6fd8e49..45c42429843 100644 --- a/test/integration/plugins/solidfire/TestVMSnapshots.py +++ b/test/integration/plugins/solidfire/TestVMSnapshots.py @@ -18,6 +18,7 @@ import logging import random import SignedAPICall +import time import XenAPI from solidfire.factory import ElementFactory @@ -71,14 +72,17 @@ class TestData: xenServer = "xenserver" zoneId = "zoneId" + # modify to control which hypervisor type to test + hypervisor_type = xenServer + def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.xenServer: { TestData.username: "root", @@ -101,7 +105,7 @@ def __init__(self): TestData.primaryStorage: { "name": "SolidFire-%d" % random.randint(0, 100), TestData.scope: "ZONE", - "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -145,7 +149,7 @@ def __init__(self): TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } @@ -190,7 +194,7 @@ def setUpClass(cls): # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) - template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -252,7 +256,6 @@ def setUpClass(cls): ) cls._cleanup = [ - cls.virtual_machine, compute_offering, cls.disk_offering, user, @@ -262,6 +265,10 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): try: + time.sleep(60) + + cls.virtual_machine.delete(cls.apiClient, True) + cleanup_resources(cls.apiClient, cls._cleanup) cls.primary_storage.delete(cls.apiClient) diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py index add5ed165e3..9d3ab2f43ee 100644 --- a/test/integration/plugins/solidfire/TestVolumes.py +++ b/test/integration/plugins/solidfire/TestVolumes.py @@ -31,11 +31,11 @@ # Import Integration Libraries # base - contains all resources as entities and defines create, delete, list operations on them -from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume +from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume # common - commonly used methods for all tests are listed here from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \ - list_volumes, list_hosts + list_volumes # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources @@ -49,6 +49,9 @@ # Change the "hypervisor_type" variable to control which hypervisor type to test. # If using XenServer, verify the "xen_server_hostname" variable is correct. # If using XenServer, change the "supports_cloning" variable to True or False as desired. +# +# Note: +# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0] class TestData(): @@ -65,6 +68,7 @@ class TestData(): kvm = "kvm" login = "login" mvip = "mvip" + one_GB_in_bytes = 1073741824 password = "password" port = "port" primaryStorage = "primarystorage" @@ -87,17 +91,17 @@ class TestData(): zoneId = "zoneId" # modify to control which hypervisor type to test - hypervisor_type = xenServer + hypervisor_type = kvm xen_server_hostname = "XenServer-6.5-1" def __init__(self): self.testdata = { TestData.solidFire: { - TestData.mvip: "192.168.139.112", + TestData.mvip: "10.117.40.120", TestData.username: "admin", TestData.password: "admin", TestData.port: 443, - TestData.url: "https://192.168.139.112:443" + TestData.url: "https://10.117.40.120:443" }, TestData.kvm: { TestData.username: "root", @@ -131,7 +135,7 @@ def __init__(self): TestData.primaryStorage: { "name": "SolidFire-%d" % random.randint(0, 100), TestData.scope: "ZONE", - "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", @@ -182,7 +186,7 @@ def __init__(self): TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "192.168.129.50" + TestData.url: "10.117.40.114" } @@ -190,6 +194,7 @@ class TestVolumes(cloudstackTestCase): _should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list." _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list." _should_only_be_one_host_in_list_err_msg = "There should only be one host in this list." + _should_only_be_two_volumes_in_list_err_msg = "There should only be two volumes in this list." _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer." _volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer." _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match." @@ -197,6 +202,16 @@ class TestVolumes(cloudstackTestCase): _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state." _volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero." _volume_should_not_be_in_a_vag = "The volume should not be in a volume access group." + _volume_size_not_an_int = "'volume_size_in_GB' is not of type 'int'" + _only_data_volumes_err_msg = "Only data volumes can be resized via a new disk offering." + _to_change_volume_size_err_msg = "To change a volume's size without providing a new disk offering, its current " \ + "disk offering must be customizable or it must be a root volume (if providing a disk offering, make sure it is " \ + "different from the current disk offering)." + _min_iops_err_msg = "The current disk offering does not support customization of the 'Min IOPS' parameter." + _this_kind_of_disk_err_msg = "This kind of KVM disk cannot be resized while it is connected to a VM that's not in the Stopped state." + _template_creation_did_not_fail_err_msg = "The template creation did not fail (as expected)." + _volume_resize_did_not_fail_err_msg = "The volume resize did not fail (as expected)." + _volume_attached_to_non_stopped_vm_err_msg = "volume is attached to a non-stopped VM" @classmethod def setUpClass(cls): @@ -221,7 +236,7 @@ def setUpClass(cls): # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -366,14 +381,14 @@ def test_01_attach_new_volume_to_stopped_VM(self): self.cleanup.append(new_volume) - self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, new_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) new_volume = self.virtual_machine.attach_volume( self.apiClient, new_volume ) - newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + newvolume = sf_util.check_and_get_cs_volume(self, new_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) TestVolumes._start_vm(self.virtual_machine) @@ -426,7 +441,7 @@ def test_02_attach_detach_attach_volume(self): sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) - self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) ####################################### ####################################### @@ -441,7 +456,7 @@ def test_02_attach_detach_attach_volume(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -486,7 +501,7 @@ def test_02_attach_detach_attach_volume(self): self.attached = False - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -527,7 +542,7 @@ def test_02_attach_detach_attach_volume(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -560,7 +575,7 @@ def test_03_attached_volume_reboot_VM(self): sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) - self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) ####################################### ####################################### @@ -575,7 +590,7 @@ def test_03_attached_volume_reboot_VM(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -614,7 +629,7 @@ def test_03_attached_volume_reboot_VM(self): ####################################### TestVolumes._reboot_vm(self.virtual_machine) - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -643,7 +658,7 @@ def test_04_detach_volume_reboot(self): sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) - self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) ####################################### ####################################### @@ -658,7 +673,7 @@ def test_04_detach_volume_reboot(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -703,7 +718,7 @@ def test_04_detach_volume_reboot(self): self.attached = False - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -739,7 +754,7 @@ def test_04_detach_volume_reboot(self): self.virtual_machine.reboot(self.apiClient) - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -764,7 +779,7 @@ def test_05_detach_vol_stopped_VM_start(self): sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) - self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) ####################################### ####################################### @@ -779,7 +794,7 @@ def test_05_detach_vol_stopped_VM_start(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -828,7 +843,7 @@ def test_05_detach_vol_stopped_VM_start(self): self.attached = False - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -864,7 +879,7 @@ def test_05_detach_vol_stopped_VM_start(self): TestVolumes._start_vm(self.virtual_machine) - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -889,7 +904,7 @@ def test_06_attach_volume_to_stopped_VM(self): sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) - self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) ####################################### ####################################### @@ -904,7 +919,7 @@ def test_06_attach_volume_to_stopped_VM(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -941,7 +956,7 @@ def test_06_attach_volume_to_stopped_VM(self): TestVolumes._start_vm(self.virtual_machine) - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -1002,7 +1017,7 @@ def test_07_destroy_expunge_VM_with_volume(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(test_virtual_machine.id) @@ -1050,7 +1065,7 @@ def test_07_destroy_expunge_VM_with_volume(self): self.attached = False - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) self.assertEqual( vol.virtualmachineid, @@ -1111,14 +1126,14 @@ def test_08_delete_volume_was_attached(self): volume_to_delete_later = new_volume - self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, new_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) new_volume = self.virtual_machine.attach_volume( self.apiClient, new_volume ) - vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, new_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -1165,7 +1180,7 @@ def test_08_delete_volume_was_attached(self): new_volume ) - vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, new_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -1253,7 +1268,7 @@ def test_09_attach_volumes_multiple_accounts(self): diskofferingid=self.disk_offering.id ) - self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, test_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) ####################################### ####################################### @@ -1268,7 +1283,7 @@ def test_09_attach_volumes_multiple_accounts(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) vm = self._get_vm(self.virtual_machine.id) @@ -1289,7 +1304,7 @@ def test_09_attach_volumes_multiple_accounts(self): test_volume ) - test_vol = self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + test_vol = sf_util.check_and_get_cs_volume(self, test_volume.id, self.testdata[TestData.volume_2][TestData.diskName], self) test_vm = self._get_vm(test_virtual_machine.id) @@ -1359,7 +1374,7 @@ def test_10_attach_more_than_one_disk_to_VM(self): self.cleanup.append(volume_2) - self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + sf_util.check_and_get_cs_volume(self, volume_2.id, self.testdata[TestData.volume_2][TestData.diskName], self) ####################################### ####################################### @@ -1374,14 +1389,14 @@ def test_10_attach_more_than_one_disk_to_VM(self): self.attached = True - vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + vol = sf_util.check_and_get_cs_volume(self, self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], self) self.virtual_machine.attach_volume( self.apiClient, volume_2 ) - vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + vol_2 = sf_util.check_and_get_cs_volume(self, volume_2.id, self.testdata[TestData.volume_2][TestData.diskName], self) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg) @@ -1421,146 +1436,1244 @@ def test_10_attach_more_than_one_disk_to_VM(self): self.virtual_machine.detach_volume(self.apiClient, volume_2) - def _check_volume(self, volume, volume_name): - self.assertTrue( - volume.name.startswith(volume_name), - "The volume name is incorrect." + def test_11_template_from_volume(self): + if TestData.hypervisor_type != TestData.kvm: + return + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=self.virtual_machine.id, + listall=True ) - self.assertEqual( - volume.diskofferingid, - self.disk_offering.id, - "The disk offering is incorrect." + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} + + try: + Template.create_from_volume(self.apiClient, vm_1_root_volume, services) + + raise Exception(TestVolumes._template_creation_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._volume_attached_to_non_stopped_vm_err_msg not in e.errorMsg: + raise + + self.virtual_machine.stop(self.apiClient) + + template = Template.create_from_volume(self.apiClient, vm_1_root_volume, services) + + self.cleanup.append(template) + + vol_snap = Snapshot.create(self.apiClient, volume_id=vm_1_root_volume.id) + + self.cleanup.append(vol_snap) + + TestVolumes._start_vm(self.virtual_machine) + + self._create_vm_using_template_and_destroy_vm(template) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap.id, services, account=self.account.name, domainid=self.domain.id) + + self.cleanup.append(volume_created_from_snapshot) + + services = {"displaytext": "Template-2", "name": "Template-2-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} + + template = Template.create_from_volume(self.apiClient, volume_created_from_snapshot, services) + + self.cleanup.append(template) + + self._create_vm_using_template_and_destroy_vm(template) + + volume_created_from_snapshot = self.virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot ) - self.assertEqual( - volume.zoneid, - self.zone.id, - "The zone is incorrect." + services = {"displaytext": "Template-3", "name": "Template-3-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} + + try: + Template.create_from_volume(self.apiClient, volume_created_from_snapshot, services) + + raise Exception(TestVolumes._template_creation_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._volume_attached_to_non_stopped_vm_err_msg not in e.errorMsg: + raise + + self.virtual_machine.stop(self.apiClient) + + template = Template.create_from_volume(self.apiClient, volume_created_from_snapshot, services) + + self.cleanup.append(template) + + volume_created_from_snapshot = self.virtual_machine.detach_volume( + self.apiClient, + volume_created_from_snapshot ) - self.assertEqual( - volume.storagetype, - self.disk_offering.storagetype, - "The storage type is incorrect." + TestVolumes._start_vm(self.virtual_machine) + + self._create_vm_using_template_and_destroy_vm(template) + + services = {"displaytext": "Template-4", "name": "Template-4-name", "ostypeid": self.template.ostypeid, "ispublic": "true"} + + template = Template.create_from_volume(self.apiClient, volume_created_from_snapshot, services) + + self.cleanup.append(template) + + self._create_vm_using_template_and_destroy_vm(template) + + def test_12_resize_volume_on_running_vm(self): + if TestData.hypervisor_type != TestData.kvm: + return + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True ) - def _check_and_get_cs_volume(self, volume_id, volume_name): + self.cleanup.append(test_virtual_machine) + list_volumes_response = list_volumes( self.apiClient, - id=volume_id + virtualmachineid=test_virtual_machine.id, + listall=True ) sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) - cs_volume = list_volumes_response[0] + test_vm_root_volume = Volume(list_volumes_response[0].__dict__) - self._check_volume(cs_volume, volume_name) + self._handle_root_volume_with_started_vm(test_vm_root_volume) - return cs_volume + volume_name = { + TestData.diskName: "test-volume-a", + } - def _verify_hsr(self, cs_volume_size_in_gb, hsr, sf_volume_size_in_bytes): - cs_volume_size_including_hsr_in_bytes = self._get_cs_volume_size_including_hsr_in_bytes(cs_volume_size_in_gb, hsr) + test_vm_data_volume = Volume.create( + self.apiClient, + volume_name, + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(test_vm_data_volume) + + test_vm_data_volume = test_virtual_machine.attach_volume( + self.apiClient, + test_vm_data_volume + ) + + test_vm_data_volume = Volume(test_vm_data_volume.__dict__) + + self._handle_data_volume_with_started_vm(test_vm_data_volume) + + custom_disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Custom)", + "customized": True, + "customizediops": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + custom_disk_offering = DiskOffering.create( + self.apiClient, + custom_disk_offering, + custom=True + ) + + self.cleanup.append(custom_disk_offering) + + services = { + TestData.diskName: "test-volume-custom-a", + "customdisksize": 100, + "customminiops": 1000, + "custommaxiops": 2000, + "zoneid": self.testdata[TestData.zoneId] + } + + test_vm_data_volume = Volume.create_custom_disk( + self.apiClient, + services, + account=self.account.name, + domainid=self.domain.id, + diskofferingid=custom_disk_offering.id + ) + + self.cleanup.append(test_vm_data_volume) + + test_vm_data_volume = test_virtual_machine.attach_volume( + self.apiClient, + test_vm_data_volume + ) + + test_vm_data_volume = Volume(test_vm_data_volume.__dict__) + + self._handle_custom_data_volume_with_started_vm(test_vm_data_volume) + + def test_13_resize_volume_on_stopped_vm(self): + if TestData.hypervisor_type != TestData.kvm: + return + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=False + ) + + self.cleanup.append(test_virtual_machine) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=test_virtual_machine.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + test_vm_root_volume = Volume(list_volumes_response[0].__dict__) + + err_msg = "Check if SF volume was created in correct account" + + try: + # This should fail because there should not be an equivalent SolidFire volume in the cluster yet. + self._verify_volume(test_vm_root_volume) + + raise Exception("The volume verification did not fail (as expected).") + except Exception as e: + if err_msg not in str(e): + raise + + # Starting the up the should create its root disk on the SolidFire cluster. + test_virtual_machine.start(self.apiClient) + + test_virtual_machine.stop(self.apiClient) + + self._handle_root_volume_with_stopped_vm(test_vm_root_volume) + + volume_name = { + TestData.diskName: "test-volume-a", + } + + test_vm_data_volume = Volume.create( + self.apiClient, + volume_name, + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(test_vm_data_volume) + + test_vm_data_volume = test_virtual_machine.attach_volume( + self.apiClient, + test_vm_data_volume + ) + + test_vm_data_volume = Volume(test_vm_data_volume.__dict__) + + self._handle_data_volume_with_stopped_vm(test_vm_data_volume) + + custom_disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Custom)", + "customized": True, + "customizediops": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + custom_disk_offering = DiskOffering.create( + self.apiClient, + custom_disk_offering, + custom=True + ) + + self.cleanup.append(custom_disk_offering) + + services = { + TestData.diskName: "test-volume-custom-a", + "customdisksize": 100, + "customminiops": 1000, + "custommaxiops": 2000, + "zoneid": self.testdata[TestData.zoneId] + } + + test_vm_data_volume = Volume.create_custom_disk( + self.apiClient, + services, + account=self.account.name, + domainid=self.domain.id, + diskofferingid=custom_disk_offering.id + ) + + self.cleanup.append(test_vm_data_volume) + + test_vm_data_volume = test_virtual_machine.attach_volume( + self.apiClient, + test_vm_data_volume + ) + + test_vm_data_volume = Volume(test_vm_data_volume.__dict__) + + self._handle_custom_data_volume_with_stopped_vm(test_vm_data_volume) + + test_vm_data_volume = test_virtual_machine.detach_volume( + self.apiClient, + test_vm_data_volume + ) + + test_vm_data_volume = Volume(test_vm_data_volume.__dict__) + + self._handle_custom_data_volume_with_stopped_vm(test_vm_data_volume) + + def _handle_root_volume_with_started_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes self.assertTrue( - cs_volume_size_including_hsr_in_bytes == sf_volume_size_in_bytes, - "HSR does not add up correctly." - ); + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) - def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr): - if TestData.hypervisor_type == TestData.kvm: - return self._get_bytes_from_gb(cs_volume_size_in_gb) + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 - lowest_hsr = 10 + sf_volume_size = self._get_sf_volume(volume.name).total_size - if hsr < lowest_hsr: - hsr = lowest_hsr; + try: + # Try to change the size and IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) - return self._get_bytes_from_gb(cs_volume_size_in_gb + (cs_volume_size_in_gb * (hsr / 100))) + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise - def _get_bytes_from_gb(self, number_in_gb): - return number_in_gb * 1024 * 1024 * 1024 + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) - def _get_vm(self, vm_id): - list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) + try: + # Try to change the size of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size) - sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg) + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } - return list_vms_response[0] + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) - def _check_xen_sr(self, xen_sr_name, should_exist=True): - sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist) + try: + # Try to change the size and IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) - def _get_active_sf_volumes(self, sf_account_id=None): - sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._only_data_volumes_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_B", + "displaytext": "SF_DO_B (Min IOPS = " + str(volume.miniops) + "; Max IOPS = " + str(volume.maxiops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": volume.miniops, + "maxiops": volume.maxiops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } - self.assertNotEqual( - len(sf_volumes), - 0, - TestVolumes._volume_response_should_not_be_zero_err_msg + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering ) - return sf_volumes + try: + # Try to change the size of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) - def _get_template_cache_name(self): - if TestData.hypervisor_type == TestData.kvm: - return TestData.templateCacheNameKvm - elif TestData.hypervisor_type == TestData.xenServer: - return TestData.templateCacheNameXenServer + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._only_data_volumes_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_C", + "displaytext": "SF_DO_C (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": volume_size_in_GB, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } - self.assert_(False, "Invalid hypervisor type") + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) - def _get_modified_iscsi_name(self, sf_iscsi_name): - sf_iscsi_name = sf_iscsi_name.replace("/", "") + volume.resize(self.apiClient, diskofferingid=disk_offering.id) - return sf_iscsi_name[:-1] + disk_offering.delete(self.apiClient) - def _check_host_side(self, sf_iscsi_name, vm_hostid=None, should_exist=True): - if TestData.hypervisor_type == TestData.kvm: - self._check_kvm_host_side(self._get_modified_iscsi_name(sf_iscsi_name), vm_hostid, should_exist) - elif TestData.hypervisor_type == TestData.xenServer: - self._check_xen_sr(sf_iscsi_name, should_exist) + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) - def _check_kvm_host_side(self, sf_iscsi_name, vm_hostid, should_exist=True): - if vm_hostid is None: - list_hosts_response = list_hosts( - self.apiClient, - type="Routing" - ) - else: - list_hosts_response = list_hosts( - self.apiClient, - id=vm_hostid - ) + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) - sf_util.check_list(list_hosts_response, 1, self, TestVolumes._should_only_be_one_host_in_list_err_msg) + volume = Volume(list_volumes_response[0].__dict__) - kvm_login = self.testdata[TestData.kvm] + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) - for cs_host in list_hosts_response: - ssh_connection = sf_util.get_ssh_connection(cs_host.ipaddress, kvm_login[TestData.username], kvm_login[TestData.password]) + new_min_iops = new_min_iops + 10 + new_max_iops = new_max_iops + 20 - stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + sf_iscsi_name) + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) - result = stdout.read() + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) - ssh_connection.close() + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) - if should_exist: - self.assertFalse(result is None, "Unable to locate 'by-path' field on the KVM host (None)") - self.assertFalse(len(result.strip()) <= len(sf_iscsi_name), "Unable to locate the 'by-path' field on the KVM host (Zero-length string)") - else: - self.assertTrue(result is None or len(result.strip()) == 0, "Found the 'by-path' field on the KVM host, but did not expect to") + volume = Volume(list_volumes_response[0].__dict__) - @classmethod - def _start_vm(cls, vm): - vm.start(cls.apiClient) + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) - # Libvirt appears to have an issue detaching a volume from a VM while the VM is booting up. - # The XML sent to update the VM seems correct, but it doesn't appear to update the XML that describes the VM. - # For KVM, just give it 90 seconds to boot up. - if TestData.hypervisor_type == TestData.kvm: - time.sleep(90) + def _handle_root_volume_with_stopped_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 + + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + new_size = new_size + 10 + + volume.resize(self.apiClient, size=new_size) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + new_min_iops = new_min_iops + 100 + new_max_iops = new_max_iops + 200 + + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + new_size = new_size + 10 + new_min_iops = new_min_iops + 100 + new_max_iops = new_max_iops + 200 + + disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + try: + # Try to change the size and IOPS of a volume attached to a stopped VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._only_data_volumes_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_B", + "displaytext": "SF_DO_B (Min IOPS = " + str(volume.miniops) + "; Max IOPS = " + str(volume.maxiops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": volume.miniops, + "maxiops": volume.maxiops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + try: + # Try to change the size of a volume attached to a stopped VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._only_data_volumes_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + disk_offering = { + "name": "SF_DO_C", + "displaytext": "SF_DO_C (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": volume_size_in_GB, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + disk_offering.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + def _handle_data_volume_with_started_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + try: + # Try to change the size and IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._to_change_volume_size_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + try: + # Try to change the size of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._to_change_volume_size_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + try: + # Try to change the size and IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_B", + "displaytext": "SF_DO_B (Min IOPS = " + str(volume.miniops) + "; Max IOPS = " + str(volume.maxiops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": volume.miniops, + "maxiops": volume.maxiops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + try: + # Try to change the size of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + try: + # Try to change the IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._min_iops_err_msg not in str(e): + raise + + disk_offering.delete(self.apiClient) + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_C", + "displaytext": "SF_DO_C (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": volume_size_in_GB, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + disk_offering.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + def _handle_data_volume_with_stopped_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + try: + # Try to change the size and IOPS of a volume attached to a stopped VM (should fail). + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._to_change_volume_size_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + try: + # Try to change the size of a volume attached to a stopped VM (should fail). + volume.resize(self.apiClient, size=new_size) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._to_change_volume_size_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + try: + # Try to change the IOPS of a volume attached to a stopped VM (should fail). + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._min_iops_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + disk_offering = { + "name": "SF_DO_A", + "displaytext": "SF_DO_A (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + disk_offering.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + new_size = new_size + 10 + + disk_offering = { + "name": "SF_DO_B", + "displaytext": "SF_DO_B (Min IOPS = " + str(volume.miniops) + "; Max IOPS = " + str(volume.maxiops) + ")", + "disksize": new_size, + "customizediops": False, + "miniops": volume.miniops, + "maxiops": volume.maxiops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + disk_offering.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_min_iops = new_min_iops + 100 + new_max_iops = new_max_iops + 200 + + disk_offering = { + "name": "SF_DO_C", + "displaytext": "SF_DO_C (Min IOPS = " + str(new_min_iops) + "; Max IOPS = " + str(new_max_iops) + ")", + "disksize": volume_size_in_GB, + "customizediops": False, + "miniops": new_min_iops, + "maxiops": new_max_iops, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + + disk_offering = DiskOffering.create( + self.apiClient, + disk_offering + ) + + volume.resize(self.apiClient, diskofferingid=disk_offering.id) + + disk_offering.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + def _handle_custom_data_volume_with_started_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + try: + # Try to change the size and IOPS of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + try: + # Try to change the size of a volume attached to a running VM (should fail). + volume.resize(self.apiClient, size=new_size) + + raise Exception(TestVolumes._volume_resize_did_not_fail_err_msg) + except Exception as e: + if TestVolumes._this_kind_of_disk_err_msg not in str(e): + raise + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + def _handle_custom_data_volume_with_stopped_vm(self, volume): + self._verify_volume(volume) + + volume_size_in_GB = volume.size / TestData.one_GB_in_bytes + + self.assertTrue( + type(volume_size_in_GB) == int, + TestVolumes._volume_size_not_an_int + ) + + new_size = volume_size_in_GB + 10 + new_min_iops = volume.miniops + 100 + new_max_iops = volume.maxiops + 200 + + volume.resize(self.apiClient, size=new_size, miniops=new_min_iops, maxiops=new_max_iops) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + new_size = new_size + 10 + + volume.resize(self.apiClient, size=new_size) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + sf_volume_size = self._get_sf_volume(volume.name).total_size + + self._verify_volume(volume, volume.miniops, volume.maxiops, sf_volume_size) + + new_min_iops = volume.miniops + 10 + new_max_iops = volume.maxiops + 20 + + volume.resize(self.apiClient, miniops=new_min_iops, maxiops=new_max_iops) + + list_volumes_response = list_volumes( + self.apiClient, + id=volume.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + volume = Volume(list_volumes_response[0].__dict__) + + self._verify_volume(volume, new_min_iops, new_max_iops, sf_volume_size) + + def _get_sf_volume(self, volume_name): + sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") + + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + + self.assertNotEqual( + len(sf_volumes), + 0, + "The length of the response for the SolidFire-volume query should not be zero." + ) + + return sf_util.check_and_get_sf_volume(sf_volumes, volume_name, self) + + def _verify_volume(self, cs_volume, expected_min_iops=None, expected_max_iops=None, expected_size=None): + sf_volume = self._get_sf_volume(cs_volume.name) + + sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, cs_volume, self) + + sf_util.check_size_and_iops(sf_volume, cs_volume, sf_volume_size, self) + + if expected_min_iops is not None: + self.assertEqual( + cs_volume.miniops, + expected_min_iops, + "Unexpected Min IOPS value (CloudStack volume has " + str(cs_volume.miniops) + "; expected " + str(expected_min_iops) + ")" + ) + + if expected_max_iops is not None: + self.assertEqual( + cs_volume.maxiops, + expected_max_iops, + "Unexpected Max IOPS value (CloudStack volume has " + str(cs_volume.maxiops) + "; expected " + str(expected_max_iops) + ")" + ) + + if expected_size is not None: + self.assertEqual( + sf_volume_size, + expected_size, + "Unexpected size value (CloudStack volume (with HSR) has " + str(sf_volume_size) + "; expected " + str(expected_size) + ")" + ) + + def _create_vm_using_template_and_destroy_vm(self, template): + vm_name = "VM-%d" % random.randint(0, 100) + + virtual_machine_dict = {"name": vm_name, "displayname": vm_name} + + virtual_machine = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + vm_root_volume = list_volumes_response[0] + vm_root_volume_name = vm_root_volume.name + + sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg) + + # Get volume information from SolidFire cluster + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + + sf_util.get_sf_volume_by_name(self, sf_volumes, vm_root_volume_name) + + virtual_machine.delete(self.apiClient, True) + + def _verify_hsr(self, cs_volume_size_in_gb, hsr, sf_volume_size_in_bytes): + cs_volume_size_including_hsr_in_bytes = self._get_cs_volume_size_including_hsr_in_bytes(cs_volume_size_in_gb, hsr) + + self.assertTrue( + cs_volume_size_including_hsr_in_bytes == sf_volume_size_in_bytes, + "HSR does not add up correctly." + ) + + def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr): + if TestData.hypervisor_type == TestData.kvm: + return self._get_bytes_from_gb(cs_volume_size_in_gb) + + lowest_hsr = 10 + + if hsr < lowest_hsr: + hsr = lowest_hsr + + return self._get_bytes_from_gb(cs_volume_size_in_gb + (cs_volume_size_in_gb * (hsr / 100))) + + def _get_bytes_from_gb(self, number_in_gb): + return number_in_gb * 1024 * 1024 * 1024 + + def _get_vm(self, vm_id): + list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) + + sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg) + + return list_vms_response[0] + + def _check_xen_sr(self, xen_sr_name, should_exist=True): + sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist) + + def _get_active_sf_volumes(self, sf_account_id=None): + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) + + self.assertNotEqual( + len(sf_volumes), + 0, + TestVolumes._volume_response_should_not_be_zero_err_msg + ) + + return sf_volumes + + def _get_template_cache_name(self): + if TestData.hypervisor_type == TestData.kvm: + return TestData.templateCacheNameKvm + elif TestData.hypervisor_type == TestData.xenServer: + return TestData.templateCacheNameXenServer + + self.assert_(False, "Invalid hypervisor type") + + def _get_modified_iscsi_name(self, sf_iscsi_name): + sf_iscsi_name = sf_iscsi_name.replace("/", "") + + return sf_iscsi_name[:-1] + + def _check_host_side(self, sf_iscsi_name, vm_hostid=None, should_exist=True): + if TestData.hypervisor_type == TestData.kvm: + self._check_kvm_host_side(self._get_modified_iscsi_name(sf_iscsi_name), vm_hostid, should_exist) + elif TestData.hypervisor_type == TestData.xenServer: + self._check_xen_sr(sf_iscsi_name, should_exist) + + def _check_kvm_host_side(self, sf_iscsi_name, vm_hostid, should_exist=True): + if vm_hostid is None: + list_hosts_response = list_hosts( + self.apiClient, + type="Routing" + ) + else: + list_hosts_response = list_hosts( + self.apiClient, + id=vm_hostid + ) + + sf_util.check_list(list_hosts_response, 1, self, TestVolumes._should_only_be_one_host_in_list_err_msg) + + kvm_login = self.testdata[TestData.kvm] + + for cs_host in list_hosts_response: + ssh_connection = sf_util.get_ssh_connection(cs_host.ipaddress, kvm_login[TestData.username], kvm_login[TestData.password]) + + stdout = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + sf_iscsi_name)[1] + + result = stdout.read() + + ssh_connection.close() + + if should_exist: + self.assertFalse(result is None, "Unable to locate 'by-path' field on the KVM host (None)") + self.assertFalse(len(result.strip()) <= len(sf_iscsi_name), "Unable to locate the 'by-path' field on the KVM host (Zero-length string)") + else: + self.assertTrue(result is None or len(result.strip()) == 0, "Found the 'by-path' field on the KVM host, but did not expect to") + + @classmethod + def _start_vm(cls, vm): + vm_for_check = list_virtual_machines( + cls.apiClient, + id=vm.id + )[0] + + if vm_for_check.state == VirtualMachine.STOPPED: + vm.start(cls.apiClient) + + # Libvirt appears to have an issue detaching a volume from a VM while the VM is booting up. + # The XML sent to update the VM seems correct, but it doesn't appear to update the XML that describes the VM. + # For KVM, just give it 90 seconds to boot up. + if TestData.hypervisor_type == TestData.kvm: + time.sleep(90) @classmethod def _reboot_vm(cls, vm): diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py index de848d953c9..e7adf113bd4 100644 --- a/test/integration/plugins/solidfire/util/sf_util.py +++ b/test/integration/plugins/solidfire/util/sf_util.py @@ -17,6 +17,8 @@ import paramiko +from marvin.lib.common import list_volumes + def check_list(in_list, expected_size_of_list, obj_assert, err_msg): obj_assert.assertEqual( isinstance(in_list, list), @@ -247,3 +249,83 @@ def get_ssh_connection(ip_address, username, password): return ssh_client +def get_sf_volume_by_name(obj_assert, sf_volumes, sf_volume_name): + check_list_not_empty(obj_assert, sf_volumes) + + sf_volume = None + + for volume in sf_volumes: + if volume.name == sf_volume_name: + sf_volume = volume + + break + + obj_assert.assertNotEqual( + sf_volume, + None, + "The SolidFire volume could not be found in the expected list." + ) + + return sf_volume + +def check_list_not_empty(obj_assert, in_list): + obj_assert.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + obj_assert.assertGreater( + len(in_list), + 0, + "The size of 'in_list' must be greater than zero." + ) + +def check_and_get_cs_volume(obj_test, volume_id, volume_name, obj_assert): + list_volumes_response = list_volumes( + obj_test.apiClient, + id=volume_id + ) + + check_list(list_volumes_response, 1, obj_assert, "There should only be one volume in this list.") + + cs_volume = list_volumes_response[0] + + check_volume(obj_test, cs_volume, volume_name, obj_assert) + + return cs_volume + +def check_volume(obj_test, cs_volume, volume_name, obj_assert): + obj_assert.assertTrue( + cs_volume.name.startswith(volume_name), + "The volume name is incorrect." + ) + + obj_assert.assertEqual( + cs_volume.zoneid, + obj_test.zone.id, + "The zone is incorrect." + ) + + obj_assert.assertEqual( + cs_volume.diskofferingid, + obj_test.disk_offering.id, + "The disk offering is incorrect." + ) + + obj_assert.assertEqual( + cs_volume.storagetype, + obj_test.disk_offering.storagetype, + "The storage type is incorrect." + ) + +def get_cs_volume_db_id(dbConnection, vol): + return get_db_id(dbConnection, "volumes", vol) + +def get_db_id(dbConnection, table, db_obj): + sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = dbConnection.execute(sql_query) + + return sql_result[0][0] diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 668dd33cc00..a025efefcbc 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -189,7 +189,8 @@ 'removeAnnotation' : 'Annotations', 'CA': 'Certificate', 'listElastistorInterface': 'Misc', - 'cloudian': 'Cloudian' + 'cloudian': 'Cloudian', + 'Sioc' : 'Sioc' } diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index b84d3f1e3ec..23621991cc2 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -981,7 +981,15 @@ def create_custom_disk(cls, apiclient, services, account=None, elif "customdiskofferingid" in services: cmd.diskofferingid = services["customdiskofferingid"] - cmd.size = services["customdisksize"] + if "customdisksize" in services: + cmd.size = services["customdisksize"] + + if "customminiops" in services: + cmd.miniops = services["customminiops"] + + if "custommaxiops" in services: + cmd.maxiops = services["custommaxiops"] + cmd.zoneid = services["zoneid"] if account: @@ -1020,6 +1028,12 @@ def create_from_snapshot(cls, apiclient, snapshot_id, services, cmd.domainid = services["domainid"] return Volume(apiclient.createVolume(cmd).__dict__) + @classmethod + def revertToSnapshot(cls, apiclient, volumeSnapshotId): + cmd = revertSnapshot.revertSnapshotCmd() + cmd.id = volumeSnapshotId + return apiclient.revertSnapshot(cmd) + def delete(self, apiclient): """Delete Volume""" cmd = deleteVolume.deleteVolumeCmd() @@ -1330,12 +1344,34 @@ def extract(cls, apiclient, id, mode, zoneid=None): return apiclient.extractTemplate(cmd) + @classmethod + def create_from_volume(cls, apiclient, volume, services, + random_name=True): + """Create Template from volume""" + # Create template from Volume ID + cmd = createTemplate.createTemplateCmd() + + Template._set_command(apiclient, cmd, services, random_name) + + cmd.volumeid = volume.id + + return Template(apiclient.createTemplate(cmd).__dict__) + @classmethod def create_from_snapshot(cls, apiclient, snapshot, services, random_name=True): """Create Template from snapshot""" - # Create template from Virtual machine and Snapshot ID + # Create template from Snapshot ID cmd = createTemplate.createTemplateCmd() + + Template._set_command(apiclient, cmd, services, random_name) + + cmd.snapshotid = snapshot.id + + return Template(apiclient.createTemplate(cmd).__dict__) + + @classmethod + def _set_command(cls, apiclient, cmd, services, random_name=True): cmd.displaytext = services["displaytext"] cmd.name = "-".join([ services["name"], @@ -1362,9 +1398,6 @@ def create_from_snapshot(cls, apiclient, snapshot, services, raise Exception( "Unable to find Ostype is required for creating template") - cmd.snapshotid = snapshot.id - return Template(apiclient.createTemplate(cmd).__dict__) - def delete(self, apiclient, zoneid=None): """Delete Template""" @@ -2188,12 +2221,15 @@ def create(cls, apiclient, services, tags=None, custom=False, domainid=None): if "customizediops" in services: cmd.customizediops = services["customizediops"] + else: + cmd.customizediops = False - if "maxiops" in services: - cmd.maxiops = services["maxiops"] + if not cmd.customizediops: + if "miniops" in services: + cmd.miniops = services["miniops"] - if "miniops" in services: - cmd.miniops = services["miniops"] + if "maxiops" in services: + cmd.maxiops = services["maxiops"] if "hypervisorsnapshotreserve" in services: cmd.hypervisorsnapshotreserve = services["hypervisorsnapshotreserve"] diff --git a/ui/scripts/system.js b/ui/scripts/system.js index ad3b7cd0690..07520df1d46 100755 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -18016,6 +18016,10 @@ id: "gluster", description: "Gluster" }); + items.push({ + id: "custom", + description: "custom" + }); args.response.success({ data: items }); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java index 3659cf5dc17..817320b5a21 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java @@ -57,7 +57,7 @@ public DatastoreMO(VmwareContext context, String morType, String morValue) { @Override public String getName() throws Exception { if (_name == null) - _name = (String)_context.getVimClient().getDynamicProperty(_mor, "name"); + _name = _context.getVimClient().getDynamicProperty(_mor, "name"); return _name; } @@ -109,7 +109,7 @@ public String getInventoryPath() throws Exception { PropertyFilterSpec pfSpec = new PropertyFilterSpec(); pfSpec.getPropSet().add(pSpec); pfSpec.getObjectSet().add(oSpec); - List pfSpecArr = new ArrayList(); + List pfSpecArr = new ArrayList<>(); pfSpecArr.add(pfSpec); List ocs = _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); @@ -118,10 +118,14 @@ public String getInventoryPath() throws Exception { assert (ocs.get(0).getObj() != null); assert (ocs.get(0).getPropSet() != null); String dcName = ocs.get(0).getPropSet().get(0).getVal().toString(); - _ownerDc = new Pair(new DatacenterMO(_context, ocs.get(0).getObj()), dcName); + _ownerDc = new Pair<>(new DatacenterMO(_context, ocs.get(0).getObj()), dcName); return _ownerDc; } + public void renameDatastore(String newDatastoreName) throws Exception { + _context.getService().renameDatastore(_mor, newDatastoreName); + } + public void makeDirectory(String path, ManagedObjectReference morDc) throws Exception { String datastoreName = getName(); ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager(); @@ -133,7 +137,7 @@ public void makeDirectory(String path, ManagedObjectReference morDc) throws Exce _context.getService().makeDirectory(morFileManager, fullPath, morDc, true); } - public String getDatastoreRootPath() throws Exception { + String getDatastoreRootPath() throws Exception { return String.format("[%s]", getName()); } @@ -210,7 +214,7 @@ public boolean deleteFile(String path, ManagedObjectReference morDc, boolean tes return false; } - public boolean copyDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, ManagedObjectReference morDestDs, String destFilePath, + boolean copyDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, ManagedObjectReference morDestDs, String destFilePath, ManagedObjectReference morDestDc, boolean forceOverwrite) throws Exception { String srcDsName = getName(); @@ -269,7 +273,7 @@ public boolean moveDatastoreFile(String srcFilePath, ManagedObjectReference morS public String[] getVmdkFileChain(String rootVmdkDatastoreFullPath) throws Exception { Pair dcPair = getOwnerDatacenter(); - List files = new ArrayList(); + List files = new ArrayList<>(); files.add(rootVmdkDatastoreFullPath); String currentVmdkFullPath = rootVmdkDatastoreFullPath; @@ -399,7 +403,7 @@ public String searchFileInSubFolders(String fileName, boolean caseInsensitive, S return rootDirectoryFilePath; } - String parentFolderPath = null; + String parentFolderPath; String absoluteFileName = null; s_logger.info("Searching file " + fileName + " in " + datastorePath); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java index 731cfeaeaa6..f38f610e145 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java @@ -25,7 +25,10 @@ import com.vmware.vim25.DatastoreInfo; import com.vmware.vim25.DynamicProperty; import com.vmware.vim25.HostNasVolumeSpec; +import com.vmware.vim25.HostResignatureRescanResult; import com.vmware.vim25.HostScsiDisk; +import com.vmware.vim25.HostUnresolvedVmfsResignatureSpec; +import com.vmware.vim25.HostUnresolvedVmfsVolume; import com.vmware.vim25.ManagedObjectReference; import com.vmware.vim25.NasDatastoreInfo; import com.vmware.vim25.ObjectContent; @@ -34,6 +37,7 @@ import com.vmware.vim25.PropertySpec; import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VmfsDatastoreCreateSpec; +import com.vmware.vim25.VmfsDatastoreExpandSpec; import com.vmware.vim25.VmfsDatastoreOption; import com.cloud.hypervisor.vmware.util.VmwareContext; @@ -75,6 +79,18 @@ public ManagedObjectReference findDatastore(String name) throws Exception { return null; } + public List queryUnresolvedVmfsVolumes() throws Exception { + return _context.getService().queryUnresolvedVmfsVolumes(_mor); + } + + public List queryVmfsDatastoreExpandOptions(DatastoreMO datastoreMO) throws Exception { + return _context.getService().queryVmfsDatastoreExpandOptions(_mor, datastoreMO.getMor()); + } + + public void expandVmfsDatastore(DatastoreMO datastoreMO, VmfsDatastoreExpandSpec vmfsDatastoreExpandSpec) throws Exception { + _context.getService().expandVmfsDatastore(_mor, datastoreMO.getMor(), vmfsDatastoreExpandSpec); + } + // storeUrl in nfs://host/exportpath format public ManagedObjectReference findDatastoreByUrl(String storeUrl) throws Exception { assert (storeUrl != null); @@ -195,6 +211,22 @@ public NasDatastoreInfo getNasDatastoreInfo(ManagedObjectReference morDatastore) return null; } + public HostResignatureRescanResult resignatureUnresolvedVmfsVolume(HostUnresolvedVmfsResignatureSpec resolutionSpec) throws Exception { + ManagedObjectReference task = _context.getService().resignatureUnresolvedVmfsVolumeTask(_mor, resolutionSpec); + + boolean result = _context.getVimClient().waitForTask(task); + + if (result) { + _context.waitForTaskProgressDone(task); + + TaskMO taskMO = new TaskMO(_context, task); + + return (HostResignatureRescanResult)taskMO.getTaskInfo().getResult(); + } else { + throw new Exception("Unable to register vm due to " + TaskMO.getTaskFailureInfo(_context, task)); + } + } + public List getDatastorePropertiesOnHostDatastoreSystem(String[] propertyPaths) throws Exception { PropertySpec pSpec = new PropertySpec(); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java index 6353e3501f5..4e14822b259 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java @@ -52,4 +52,12 @@ public void rescanHba(String iScsiHbaDevice) throws Exception { public void rescanVmfs() throws Exception { _context.getService().rescanVmfs(_mor); } + + public void mountVmfsVolume(String datastoreUuid) throws Exception { + _context.getService().mountVmfsVolume(_mor, datastoreUuid); + } + + public void unmountVmfsVolume(String datastoreUuid) throws Exception { + _context.getService().unmountVmfsVolume(_mor, datastoreUuid); + } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineDiskInfoBuilder.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineDiskInfoBuilder.java index 3b310fb586e..ee98e3af9f0 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineDiskInfoBuilder.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineDiskInfoBuilder.java @@ -89,8 +89,9 @@ public VirtualMachineDiskInfo getDiskInfoByBackingFileBaseName(String diskBackin private static boolean chainContains(List chain, String diskBackingFileBaseName, String dataStoreName) { for (String backing : chain) { DatastoreFile file = new DatastoreFile(backing); + // Ensure matching disk exists in the right datastore - if (file.getFileBaseName().equals(diskBackingFileBaseName) && backing.contains(dataStoreName)) + if (file.getFileBaseName().equals(diskBackingFileBaseName) && file.getDatastoreName().equals(dataStoreName)) return true; } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 963813bae06..0078793df8d 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -468,19 +468,26 @@ public VirtualMachineSnapshotInfo getSnapshotInfo() throws Exception { } public boolean createSnapshot(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception { + return createSnapshotGetReference(snapshotName, snapshotDescription, dumpMemory, quiesce) != null; + } + public ManagedObjectReference createSnapshotGetReference(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception { long apiTimeout = _context.getVimClient().getVcenterSessionTimeout(); ManagedObjectReference morTask = _context.getService().createSnapshotTask(_mor, snapshotName, snapshotDescription, dumpMemory, quiesce); boolean result = _context.getVimClient().waitForTask(morTask); + if (result) { _context.waitForTaskProgressDone(morTask); ManagedObjectReference morSnapshot = null; + // We still need to wait until the object appear in vCenter long startTick = System.currentTimeMillis(); + while (System.currentTimeMillis() - startTick < apiTimeout) { morSnapshot = getSnapshotMor(snapshotName); + if (morSnapshot != null) { break; } @@ -493,16 +500,20 @@ public boolean createSnapshot(String snapshotName, String snapshotDescription, b } if (morSnapshot == null) { - s_logger.error("We've been waiting for over " + apiTimeout + " milli seconds for snapshot MOR to be appearing in vCenter after CreateSnapshot task is done, but it is still not there?!"); - return false; + s_logger.error("We've been waiting for over " + apiTimeout + " milli seconds for snapshot MOR to be appearing in vCenter after CreateSnapshot task is done, " + + "but it is still not there?!"); + + return null; } + s_logger.debug("Waited for " + (System.currentTimeMillis() - startTick) + " seconds for snapshot object [" + snapshotName + "] to appear in vCenter."); - return true; + + return morSnapshot; } else { s_logger.error("VMware createSnapshot_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - return false; + return null; } public boolean removeSnapshot(String snapshotName, boolean removeChildren) throws Exception { @@ -542,6 +553,21 @@ public boolean revertToSnapshot(String snapshotName) throws Exception { return false; } + /** + * Deletes all of the snapshots of a VM. + */ + public void consolidateAllSnapshots() throws Exception { + ManagedObjectReference task = _context.getService().removeAllSnapshotsTask(_mor, true); + + boolean result = _context.getVimClient().waitForTask(task); + + if (result) { + _context.waitForTaskProgressDone(task); + } else { + throw new Exception("Unable to register VM due to the following issue: " + TaskMO.getTaskFailureInfo(_context, task)); + } + } + public boolean removeAllSnapshots() throws Exception { VirtualMachineSnapshotInfo snapshotInfo = getSnapshotInfo(); @@ -2339,12 +2365,16 @@ public void ensureScsiDeviceControllers(int count, int availableBusNum) throws E // return pair of VirtualDisk and disk device bus name(ide0:0, etc) public Pair getDiskDevice(String vmdkDatastorePath) throws Exception { + final String zeroLengthString = ""; + List devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); - ArrayList> partialMatchingDiskDevices = new ArrayList>(); + ArrayList> partialMatchingDiskDevices = new ArrayList<>(); DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath); + String srcBaseName = dsSrcFile.getFileBaseName(); String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName); + String srcDatastoreName = dsSrcFile.getDatastoreName() != null ? dsSrcFile.getDatastoreName() : zeroLengthString; s_logger.info("Look for disk device info for volume : " + vmdkDatastorePath + " with base name: " + srcBaseName); @@ -2353,22 +2383,38 @@ public void ensureScsiDeviceControllers(int count, int availableBusNum) throws E if (device instanceof VirtualDisk) { s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber()); - VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking(); + VirtualDeviceBackingInfo backingInfo = device.getBacking(); + if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; + do { s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName()); DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); - String backingBaseName = dsBackingFile.getFileBaseName(); - if (backingBaseName.equalsIgnoreCase(srcBaseName)) { - String deviceNumbering = getDeviceBusName(devices, device); - s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); - return new Pair((VirtualDisk)device, deviceNumbering); + + String backingDatastoreName = dsBackingFile.getDatastoreName() != null ? dsBackingFile.getDatastoreName() : zeroLengthString; + + if (srcDatastoreName.equals(zeroLengthString)) { + backingDatastoreName = zeroLengthString; } - if (backingBaseName.contains(trimmedSrcBaseName)) { - String deviceNumbering = getDeviceBusName(devices, device); - partialMatchingDiskDevices.add(new Pair((VirtualDisk)device, deviceNumbering)); + + if (srcDatastoreName.equalsIgnoreCase(backingDatastoreName)) { + String backingBaseName = dsBackingFile.getFileBaseName(); + + if (backingBaseName.equalsIgnoreCase(srcBaseName)) { + String deviceNumbering = getDeviceBusName(devices, device); + + s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); + + return new Pair<>((VirtualDisk)device, deviceNumbering); + } + + if (backingBaseName.contains(trimmedSrcBaseName)) { + String deviceNumbering = getDeviceBusName(devices, device); + + partialMatchingDiskDevices.add(new Pair<>((VirtualDisk)device, deviceNumbering)); + } } diskBackingInfo = diskBackingInfo.getParent(); @@ -2380,19 +2426,24 @@ public void ensureScsiDeviceControllers(int count, int availableBusNum) throws E // No disk device was found with an exact match for the volume path, hence look for disk device that matches the trimmed name. s_logger.info("No disk device with an exact match found for volume : " + vmdkDatastorePath + ". Look for disk device info against trimmed base name: " + srcBaseName); + if (partialMatchingDiskDevices != null) { if (partialMatchingDiskDevices.size() == 1) { VirtualDiskFlatVer2BackingInfo matchingDiskBackingInfo = (VirtualDiskFlatVer2BackingInfo)partialMatchingDiskDevices.get(0).first().getBacking(); + s_logger.info("Disk backing : " + matchingDiskBackingInfo.getFileName() + " matches ==> " + partialMatchingDiskDevices.get(0).second()); + return partialMatchingDiskDevices.get(0); } else if (partialMatchingDiskDevices.size() > 1) { s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as multiple disk devices were found to match" + " volume's trimmed base name: " + trimmedSrcBaseName); + return null; } } s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as no matching disk device found"); + return null; } ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: users@infra.apache.org With regards, Apache Git Services