cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From raj...@apache.org
Subject [1/2] git commit: updated refs/heads/master to 4721c53
Date Fri, 27 Jan 2017 00:06:13 GMT
Repository: cloudstack
Updated Branches:
  refs/heads/master a4061026b -> 4721c53ea


CLOUDSTACK-9619: Updates for SAN-assisted snapshots


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/06806a65
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/06806a65
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/06806a65

Branch: refs/heads/master
Commit: 06806a6523d4c5b46a0345ad62171ed26e843bb8
Parents: f82873e
Author: Mike Tutkowski <mike.tutkowski@solidfire.com>
Authored: Sun Nov 6 11:22:11 2016 -0700
Committer: Mike Tutkowski <mike.tutkowski@solidfire.com>
Committed: Tue Dec 6 17:32:56 2016 -0700

----------------------------------------------------------------------
 .../motion/StorageSystemDataMotionStrategy.java | 138 +++++--------------
 .../snapshot/StorageSystemSnapshotStrategy.java |  22 ++-
 .../storage/volume/VolumeServiceImpl.java       |  46 +++++--
 .../resource/Xenserver625StorageProcessor.java  |  21 ++-
 .../storage/datastore/util/SolidFireUtil.java   |   5 +-
 .../plugins/solidfire/TestManagedSystemVMs.py   |   8 +-
 .../solidfire/TestVMMigrationWithStorage.py     |   2 +-
 .../plugins/solidfire/TestVMSnapshots.py        |  68 ---------
 .../plugins/solidfire/util/sf_util.py           |   4 +-
 9 files changed, 110 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
index b636524..2b72290 100644
--- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
@@ -19,7 +19,6 @@
 package org.apache.cloudstack.storage.motion;
 
 import com.cloud.agent.AgentManager;
-import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.DataTO;
 import com.cloud.agent.api.to.DiskTO;
@@ -37,9 +36,9 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.server.ManagementService;
 import com.cloud.storage.DataStoreRole;
 import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.Snapshot;
 import com.cloud.storage.SnapshotVO;
 import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.storage.Volume;
 import com.cloud.storage.VolumeDetailVO;
 import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.DiskOfferingDao;
@@ -79,15 +78,11 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
 import org.apache.cloudstack.storage.command.CopyCmdAnswer;
 import org.apache.cloudstack.storage.command.CopyCommand;
-import org.apache.cloudstack.storage.command.DeleteCommand;
-import org.apache.cloudstack.storage.command.DettachAnswer;
-import org.apache.cloudstack.storage.command.DettachCommand;
 import org.apache.cloudstack.storage.command.ResignatureAnswer;
 import org.apache.cloudstack.storage.command.ResignatureCommand;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
-import org.apache.cloudstack.storage.to.SnapshotObjectTO;
 import org.apache.cloudstack.storage.to.VolumeObjectTO;
 import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
@@ -225,6 +220,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
 
                 if (canHandleDest) {
                     handleCreateVolumeFromSnapshotOnSecondaryStorage(snapshotInfo, volumeInfo,
callback);
+
                     return;
                 }
 
@@ -415,8 +411,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
 
                 copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
 
-                if (needCache) {
+                if (!copyCmdAnswer.getResult()) {
+                    // We were not able to copy. Handle it.
+                    errMsg = copyCmdAnswer.getDetails();
 
+                    throw new CloudRuntimeException(errMsg);
+                }
+
+                if (needCache) {
                     // If cached storage was needed (in case of object store as secondary
                     // storage), at this point, the data has been copied from the primary
                     // to the NFS cache by the hypervisor. We now invoke another copy
@@ -430,34 +432,34 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
 
                     if (ep == null) {
                         errMsg = "No remote endpoint to send command, check if host or ssvm
is down?";
+
                         LOGGER.error(errMsg);
+
                         copyCmdAnswer = new CopyCmdAnswer(errMsg);
                     } else {
-                        copyCmdAnswer = (CopyCmdAnswer) ep.sendMessage(cmd);
+                        copyCmdAnswer = (CopyCmdAnswer)ep.sendMessage(cmd);
                     }
 
                     // clean up snapshot copied to staging
-                    performCleanupCacheStorage(destOnStore);
+                    cacheMgr.deleteCacheObject(destOnStore);
                 }
 
-
             } catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException
ex) {
                 String msg = "Failed to create template from snapshot (Snapshot ID = " +
snapshotInfo.getId() + ") : ";
 
                 LOGGER.warn(msg, ex);
 
                 throw new CloudRuntimeException(msg + ex.getMessage());
-
             } finally {
-
-                // detach and remove access after the volume is created
-                SnapshotObjectTO snapshotObjectTO = (SnapshotObjectTO) snapshotInfo.getTO();
-                DiskTO disk = new DiskTO(snapshotObjectTO, null, snapshotInfo.getPath(),
Volume.Type.UNKNOWN);
-                detachManagedStoreVolume(snapshotInfo, hostVO, getProperty(snapshotInfo.getId(),
DiskTO.IQN), srcDataStore.getId(), disk);
+                _volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore);
 
                 if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
                     if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails()))
{
                         errMsg = copyCmdAnswer.getDetails();
+
+                        if (needCache) {
+                            cacheMgr.deleteCacheObject(destOnStore);
+                        }
                     }
                     else {
                         errMsg = "Unable to create template from snapshot";
@@ -497,11 +499,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
      * @param callback  for async
      */
     private void handleCreateVolumeFromSnapshotOnSecondaryStorage(SnapshotInfo snapshotInfo,
VolumeInfo volumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
-
         // at this point, the snapshotInfo and volumeInfo should have the same disk offering
ID (so either one should be OK to get a DiskOfferingVO instance)
         DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId());
         SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId());
-        DataStore destDataStore = volumeInfo.getDataStore();
 
         // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering
(used for managed storage)
         _volumeService.updateHypervisorSnapshotReserveForVolume(diskOffering, volumeInfo.getId(),
snapshot.getHypervisorType());
@@ -510,9 +510,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
         String errMsg = null;
 
         HostVO hostVO = null;
-        try {
 
-            //create a volume on the storage
+        try {
+            // create a volume on the storage
             AsyncCallFuture<VolumeApiResult> future = _volumeService.createVolumeAsync(volumeInfo,
volumeInfo.getDataStore());
             VolumeApiResult result = future.get();
 
@@ -529,7 +529,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
 
             hostVO = getHost(snapshotInfo.getDataCenterId(), false);
 
-            //copy the volume from secondary via the hypervisor
+            // copy the volume from secondary via the hypervisor
             copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO);
 
             if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
@@ -543,12 +543,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
         }
         catch (Exception ex) {
             errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in
'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'";
-        } finally {
-
-
-            DiskTO disk = new DiskTO(volumeInfo.getTO(), volumeInfo.getDeviceId(), volumeInfo.getPath(),volumeInfo.getVolumeType());
-            long storagePoolId = volumeInfo.getPoolId();
-            detachManagedStoreVolume(volumeInfo, hostVO, volumeInfo.get_iScsiName(), storagePoolId,
disk);
         }
 
         CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
@@ -556,73 +550,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
         result.setResult(errMsg);
 
         callback.complete(result);
-
-
-    }
-
-    /**
-     * Detaches a managed volume from a host
-     *  @param dataObject Volume to be detached
-     * @param hostVO    Host where the volume is currently attached
-     * @param storagePoolId Storage where volume resides
-     * @param disk  Object which stores disk attributes to send to the agents
-     */
-    private void detachManagedStoreVolume(DataObject dataObject, HostVO hostVO, String iqn,
long storagePoolId, DiskTO disk) {
-
-        DataStore destDataStore = dataObject.getDataStore();
-        DettachCommand cmd = new DettachCommand(disk, null);
-        StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
-
-        cmd.setManaged(true);
-        cmd.setStorageHost(storagePool.getHostAddress());
-        cmd.setStoragePort(storagePool.getPort());
-        cmd.set_iScsiName(iqn);
-
-        try {
-            DettachAnswer dettachAnswer = (DettachAnswer) _agentMgr.send(hostVO.getId(),
cmd);
-
-            if (!dettachAnswer.getResult()) {
-                LOGGER.warn("Error detaching DataObject:" + dettachAnswer.getDetails());
-            }
-
-        } catch (Exception e) {
-            LOGGER.warn("Error detaching DataObject " + dataObject.getId() + " Error: " +
e.getMessage());
-        }
-
-
-        try {
-            _volumeService.revokeAccess(dataObject, hostVO, destDataStore);
-        } catch (Exception e) {
-            LOGGER.warn("Error revoking access to DataObject (DataObject ID = " + dataObject.getId()
+ "): " + e.getMessage(), e);
-        }
-    }
-
-    private void performCleanupCacheStorage(DataObject destOnStore) {
-        destOnStore.processEvent(Event.DestroyRequested);
-
-        DeleteCommand deleteCommand = new DeleteCommand(destOnStore.getTO());
-        EndPoint ep = selector.select(destOnStore);
-        try {
-            if (ep == null) {
-                LOGGER.warn("Unable to cleanup staging NFS because no endpoint was found
" +
-                "Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
-
-                destOnStore.processEvent(Event.OperationFailed);
-            } else {
-                Answer deleteAnswer = ep.sendMessage(deleteCommand);
-                if (deleteAnswer != null && deleteAnswer.getResult()) {
-                    LOGGER.warn("Unable to cleanup staging NFS " + deleteAnswer.getDetails()
+
-                    "Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
-                    destOnStore.processEvent(Event.OperationFailed);
-                }
-            }
-
-            destOnStore.processEvent(Event.OperationSuccessed);
-        } catch (Exception e) {
-            destOnStore.processEvent(Event.OperationFailed);
-            LOGGER.warn("Unable to clean up staging cache Exception " + e.getMessage() +
-                    "Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
-        }
     }
 
     /**
@@ -899,7 +826,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
     }
 
     private Map<String, String> getSnapshotDetails(SnapshotInfo snapshotInfo) {
-        Map<String, String> snapshotDetails = new HashMap<String, String>();
+        Map<String, String> snapshotDetails = new HashMap<>();
 
         long storagePoolId = snapshotInfo.getDataStore().getId();
         StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
@@ -1066,30 +993,34 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
      * @return result of the copy
      */
     private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo,
HostVO hostVO) {
+        Snapshot.LocationType locationType = snapshotInfo.getLocationType();
+
         String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
         int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
 
         DataObject srcData = snapshotInfo;
         CopyCmdAnswer copyCmdAnswer = null;
         DataObject cacheData = null;
+
         boolean needCacheStorage = needCacheStorage(snapshotInfo, volumeInfo);
 
         if (needCacheStorage) {
             cacheData = cacheSnapshotChain(snapshotInfo, new ZoneScope(volumeInfo.getDataCenterId()));
             srcData = cacheData;
-
         }
 
         CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
 
         try {
+            if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
+                _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
 
-            _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
-            _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
+                Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
 
-            Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
+                copyCommand.setOptions(srcDetails);
+            }
 
-            copyCommand.setOptions(srcDetails);
+            _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
 
             Map<String, String> destDetails = getVolumeDetails(volumeInfo);
 
@@ -1105,11 +1036,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy
{
             throw new CloudRuntimeException(msg + ex.getMessage());
         }
         finally {
-            _volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
+            if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
+                _volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
+            }
+
             _volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
 
             if (needCacheStorage && copyCmdAnswer != null && copyCmdAnswer.getResult())
{
-                performCleanupCacheStorage(cacheData);
+                cacheMgr.deleteCacheObject(cacheData);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java
b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java
index 591e3a6..2742da6 100644
--- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java
@@ -90,18 +90,17 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase
{
 
     @Override
     public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
-
         Preconditions.checkArgument(snapshotInfo != null, "backupSnapshot expects a valid
snapshot");
 
         if (snapshotInfo.getLocationType() != Snapshot.LocationType.SECONDARY) {
-
             markAsBackedUp((SnapshotObject)snapshotInfo);
+
             return snapshotInfo;
         }
 
-        // At this point the snapshot is either taken as a native
+        // At this point, the snapshot is either taken as a native
         // snapshot on the storage or exists as a volume on the storage (clone).
-        // If archive flag is passed, we will copy this snapshot to secondary
+        // If archive flag is passed in, we should copy this snapshot to secondary
         // storage and delete it from the primary storage.
 
         HostVO host = getHost(snapshotInfo.getVolumeId());
@@ -109,15 +108,14 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase
{
         boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(host.getClusterId());
 
         if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) {
-            String mesg = "Cannot archive snapshot: Either canStorageSystemCreateVolumeFromSnapshot
or " +
-                    "computeClusterSupportsResign were false.  ";
+            String msg = "Cannot archive snapshot: canStorageSystemCreateVolumeFromSnapshot
and/or computeClusterSupportsResign were false.";
+
+            s_logger.warn(msg);
 
-            s_logger.warn(mesg);
-            throw new CloudRuntimeException(mesg);
+            throw new CloudRuntimeException(msg);
         }
 
         return snapshotSvr.backupSnapshot(snapshotInfo);
-
     }
 
     @Override
@@ -255,14 +253,13 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase
{
             backedUpSnapshot = backupSnapshot(snapshotOnPrimary);
 
             updateLocationTypeInDb(backedUpSnapshot);
-
         }
         finally {
             if (result != null && result.isSuccess()) {
                 volumeInfo.stateTransit(Volume.Event.OperationSucceeded);
 
                 if (snapshotOnPrimary != null && snapshotInfo.getLocationType() ==
Snapshot.LocationType.SECONDARY) {
-                    // cleanup the snapshot on primary
+                    // remove the snapshot on primary storage
                     try {
                         snapshotSvr.deleteSnapshot(snapshotOnPrimary);
                     } catch (Exception e) {
@@ -276,6 +273,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase
{
         }
 
         snapshotDao.releaseFromLockTable(snapshotInfo.getId());
+
         return backedUpSnapshot;
     }
 
@@ -586,7 +584,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase
{
 
         Snapshot.LocationType locationType = snapshot.getLocationType();
 
-        //If the snapshot exisits on Secondary Storage, we can't delete it.
+        // If the snapshot exists on Secondary Storage, we can't delete it.
         if (SnapshotOperation.DELETE.equals(op) && Snapshot.LocationType.SECONDARY.equals(locationType))
{
             return StrategyPriority.CANT_HANDLE;
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 05ec7d0..eef61a2 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -28,14 +28,6 @@ import java.util.Random;
 
 import javax.inject.Inject;
 
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.offering.DiskOffering;
-import com.cloud.org.Cluster;
-import com.cloud.org.Grouping.AllocationState;
-import com.cloud.resource.ResourceState;
-import com.cloud.server.ManagementService;
-import com.cloud.storage.RegisterVolumePayload;
-import com.cloud.utils.Pair;
 import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
 import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@@ -85,6 +77,7 @@ import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.alert.AlertManager;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.Resource.ResourceType;
+import com.cloud.dc.dao.ClusterDao;
 import com.cloud.event.EventTypes;
 import com.cloud.event.UsageEventUtils;
 import com.cloud.exception.ConcurrentOperationException;
@@ -94,7 +87,13 @@ import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
 import com.cloud.host.dao.HostDetailsDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.offering.DiskOffering;
+import com.cloud.org.Cluster;
+import com.cloud.org.Grouping.AllocationState;
+import com.cloud.resource.ResourceState;
+import com.cloud.server.ManagementService;
 import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.RegisterVolumePayload;
 import com.cloud.storage.ScopeType;
 import com.cloud.storage.Storage.StoragePoolType;
 import com.cloud.storage.StoragePool;
@@ -111,6 +110,7 @@ import com.cloud.storage.template.TemplateProp;
 import com.cloud.user.AccountManager;
 import com.cloud.user.ResourceLimitService;
 import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GlobalLock;
 import com.cloud.utils.exception.CloudRuntimeException;
@@ -317,9 +317,7 @@ public class VolumeServiceImpl implements VolumeService {
 
         VolumeVO vol = volDao.findById(volume.getId());
 
-        String volumePath = vol.getPath();
-        Long poolId = vol.getPoolId();
-        if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) {
+        if (!volumeExistsOnPrimary(vol)) {
             // not created on primary store
             if (volumeStore == null) {
                 // also not created on secondary store
@@ -348,6 +346,32 @@ public class VolumeServiceImpl implements VolumeService {
         return future;
     }
 
+    private boolean volumeExistsOnPrimary(VolumeVO vol) {
+        Long poolId = vol.getPoolId();
+
+        if (poolId == null) {
+            return false;
+        }
+
+        PrimaryDataStore primaryStore = dataStoreMgr.getPrimaryDataStore(poolId);
+
+        if (primaryStore == null) {
+            return false;
+        }
+
+        if (primaryStore.isManaged()) {
+            return true;
+        }
+
+        String volumePath = vol.getPath();
+
+        if (volumePath == null || volumePath.trim().isEmpty()) {
+            return false;
+        }
+
+        return true;
+    }
+
     public Void deleteVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult>
callback, DeleteVolumeContext<VolumeApiResult> context) {
         CommandResult result = callback.getResult();
         VolumeObject vo = context.getVolume();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
index 5c93cbb..1e7b143 100644
--- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
@@ -532,6 +532,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor
{
                     if (snapshotSr != null) {
                         hypervisorResource.removeSR(conn, snapshotSr);
                     }
+
+                    if (primaryStore.isManaged()) {
+                        hypervisorResource.removeSR(conn, primaryStorageSR);
+                    }
                 }
             } else {
                 final String primaryStorageSRUuid = primaryStorageSR.getUuid(conn);
@@ -553,9 +557,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor
{
                     physicalSize = Long.parseLong(tmp[1]);
                     finalPath = folder + File.separator + snapshotBackupUuid + ".vhd";
                 }
+
+                final String volumeUuid = snapshotTO.getVolume().getPath();
+
+                destroySnapshotOnPrimaryStorageExceptThis(conn, volumeUuid, snapshotUuid);
             }
-            final String volumeUuid = snapshotTO.getVolume().getPath();
-            destroySnapshotOnPrimaryStorageExceptThis(conn, volumeUuid, snapshotUuid);
 
             final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
             newSnapshot.setPath(finalPath);
@@ -708,9 +714,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor
{
         }
         SR srcSr = null;
         VDI destVdi = null;
-        try {
-            SR primaryStorageSR;
 
+        SR primaryStorageSR = null;
+
+        try {
             if (pool.isManaged()) {
                 Map<String,String> destDetails = cmd.getOptions2();
 
@@ -771,6 +778,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor
{
             final VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setPath(volumeUUID);
             newVol.setSize(vdir.virtualSize);
+
             return new CopyCmdAnswer(newVol);
         } catch (final Types.XenAPIException e) {
             details += " due to " + e.toString();
@@ -782,6 +790,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor
{
             if (srcSr != null) {
                 hypervisorResource.removeSR(conn, srcSr);
             }
+
+            if (pool.isManaged()) {
+                hypervisorResource.removeSR(conn, primaryStorageSR);
+            }
+
             if (!result && destVdi != null) {
                 try {
                     destVdi.destroy(conn);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
index 2caca72..f5b67a7 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
@@ -403,9 +403,10 @@ public class SolidFireUtil {
                 SolidFireUtil.getIqnsFromHosts(hosts), new long[] { sfVolumeId });
         }
         catch (Exception ex) {
-            String iqnInVagAlready = "Exceeded maximum number of Volume Access Groups per
initiator";
+            String iqnInVagAlready1 = "Exceeded maximum number of Volume Access Groups per
initiator";
+            String iqnInVagAlready2 = "Exceeded maximum number of VolumeAccessGroups per
Initiator";
 
-            if (!ex.getMessage().contains(iqnInVagAlready)) {
+            if (!ex.getMessage().contains(iqnInVagAlready1) && !ex.getMessage().contains(iqnInVagAlready2))
{
                 throw new CloudRuntimeException(ex.getMessage());
             }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/test/integration/plugins/solidfire/TestManagedSystemVMs.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestManagedSystemVMs.py b/test/integration/plugins/solidfire/TestManagedSystemVMs.py
index a3343c7..2bfbe4a 100644
--- a/test/integration/plugins/solidfire/TestManagedSystemVMs.py
+++ b/test/integration/plugins/solidfire/TestManagedSystemVMs.py
@@ -488,7 +488,9 @@ class TestManagedSystemVMs(cloudstackTestCase):
                 type="Routing"
             )
 
-            sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm],
self, False)
+            kvm_login = self.testdata[TestData.kvm]
+
+            sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, kvm_login[TestData.username],
kvm_login[TestData.password], self, False)
         else:
             self.assertTrue(False, "Invalid hypervisor type")
 
@@ -544,7 +546,9 @@ class TestManagedSystemVMs(cloudstackTestCase):
                     type="Routing"
                 )
 
-                sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response,
self.testdata[TestData.kvm], self)
+                kvm_login = self.testdata[TestData.kvm]
+
+                sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response,
kvm_login[TestData.username], kvm_login[TestData.password], self)
             else:
                 self.assertTrue(False, "Invalid hypervisor type")
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
index adbb44b..d563e5e 100644
--- a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
+++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
@@ -39,7 +39,7 @@ from marvin.lib.utils import cleanup_resources
 # Prerequisites:
 #  Only one zone
 #  Only one pod
-#  Two clusters
+#  Two clusters (have system VMs (including the VR) running on local or NFS storage)
 
 
 class TestData():

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/test/integration/plugins/solidfire/TestVMSnapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py
index db25390..880e6fd 100644
--- a/test/integration/plugins/solidfire/TestVMSnapshots.py
+++ b/test/integration/plugins/solidfire/TestVMSnapshots.py
@@ -139,77 +139,9 @@ class TestData:
                 TestData.tags: TestData.storageTag,
                 "storagetype": "shared"
             },
-            "testdiskofferings": {
-                "customiopsdo": {
-                    "name": "SF_Custom_IOPS_DO",
-                    "displaytext": "Customized IOPS DO (Size = 128 GB; Min IOPS = 500; Max
IOPS = 1000)",
-                    "disksize": 128,
-                    "customizediops": True,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizedo": {
-                    "name": "SF_Custom_Size_DO",
-                    "displaytext": "Customized IOPS DO (Min IOPS = 500; Max IOPS = 1000)",
-                    "disksize": 175,
-                    "customizediops": False,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizeandiopsdo": {
-                    "name": "SF_Custom_Size_IOPS_DO",
-                    "displaytext": "Customized Size and IOPS DO",
-                    "disksize": 200,
-                    "customizediops": True,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newiopsdo": {
-                    "name": "SF_New_IOPS_DO",
-                    "displaytext": "New IOPS (Size = 128 GB; Min IOPS = 350, Max IOPS = 700)",
-                    "disksize": 128,
-                    "miniops": 350,
-                    "maxiops": 700,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizedo": {
-                    "name": "SF_New_Size_DO",
-                    "displaytext": "New Size: 175",
-                    "disksize": 175,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizeandiopsdo": {
-                    "name": "SF_New_Size_IOPS_DO",
-                    "displaytext": "New Size and IOPS",
-                    "disksize": 200,
-                    "miniops": 200,
-                    "maxiops": 400,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                }
-            },
             TestData.volume_1: {
                 "diskname": "testvolume",
             },
-            "volume2": {
-                "diskname": "testvolume2",
-            },
             TestData.zoneId: 1,
             TestData.clusterId: 1,
             TestData.domainId: 1,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06806a65/test/integration/plugins/solidfire/util/sf_util.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py
index f35839f..de848d9 100644
--- a/test/integration/plugins/solidfire/util/sf_util.py
+++ b/test/integration/plugins/solidfire/util/sf_util.py
@@ -155,11 +155,11 @@ def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
     else:
         check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
 
-def check_kvm_access_to_volume(iscsi_name, kvm_hosts, kvm_login, obj_assert, should_exist=True):
+def check_kvm_access_to_volume(iscsi_name, kvm_hosts, username, password, obj_assert, should_exist=True):
     count = 0
 
     for kvm_host in kvm_hosts:
-        ssh_connection = sf_util.get_ssh_connection(kvm_host.ipaddress, kvm_login[TestData.username],
kvm_login[TestData.password])
+        ssh_connection = get_ssh_connection(kvm_host.ipaddress, username, password)
 
         stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep
" + iscsi_name)
 


Mime
View raw message