cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bhais...@apache.org
Subject [31/50] [abbrv] git commit: updated refs/heads/4.9-bountycastle-daan to b9ee34f
Date Wed, 28 Sep 2016 09:46:22 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVMSnapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py
index 8fba8f8..14e8e71 100644
--- a/test/integration/plugins/solidfire/TestVMSnapshots.py
+++ b/test/integration/plugins/solidfire/TestVMSnapshots.py
@@ -20,6 +20,8 @@ import random
 import SignedAPICall
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -36,8 +38,10 @@ from marvin.lib.utils import cleanup_resources
 
 from solidfire import solidfire_element_api as sf_api
 
-# on April 15, 2016: Ran 2 tests in 800.299s with three hosts
-# on May 2, 2016: Ran 2 tests in 789.729s with two hosts
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster
 
 
 class TestData:
@@ -328,7 +332,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
             cls.primary_storage.delete(cls.apiClient)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -346,7 +350,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
 
-        self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
+        sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
 
         root_volume = root_volumes[0]
 
@@ -355,7 +359,7 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id)
         sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_name)
+        sf_util.check_iscsi_name(sf_iscsi_name, self)
 
         root_volume_path_1 = self._get_path(volume_id)
 
@@ -388,7 +392,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         vdis_after_create = self._get_vdis(xen_vdis)
 
@@ -411,7 +415,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
 
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         root_volume_path_3 = self._get_path(volume_id)
 
@@ -423,7 +427,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         vdis_after_revert = self._get_vdis(xen_vdis)
 
@@ -470,7 +474,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         vdis_after_delete = self._get_vdis(xen_vdis, True)
 
@@ -505,7 +509,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
 
-        self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
+        sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
 
         root_volume = root_volumes[0]
 
@@ -514,13 +518,13 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id)
         sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_root_volume_name)
+        sf_util.check_iscsi_name(sf_iscsi_root_volume_name, self)
 
         root_volume_path_1 = self._get_path(root_volume_id)
 
         data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true")
 
-        self._check_list(data_volumes, 1, "There should only be one data volume.")
+        sf_util.check_list(data_volumes, 1, self, "There should only be one data volume.")
 
         data_volume = data_volumes[0]
 
@@ -529,7 +533,7 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id)
         sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_data_volume_name)
+        sf_util.check_iscsi_name(sf_iscsi_data_volume_name, self)
 
         data_volume_path_1 = self._get_path(data_volume_id)
 
@@ -570,7 +574,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis)
 
@@ -586,7 +590,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis)
 
@@ -609,7 +613,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
 
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         root_volume_path_3 = self._get_path(root_volume_id)
 
@@ -621,7 +625,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis)
 
@@ -653,7 +657,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis)
 
@@ -700,7 +704,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True)
 
@@ -720,7 +724,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True)
 
@@ -745,7 +749,7 @@ class TestVMSnapshots(cloudstackTestCase):
         return path_result['apipathforvolume']['path']
 
     def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot):
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         vm_snapshot_from_list = list_vm_snapshots[0]
 
@@ -767,26 +771,6 @@ class TestVMSnapshots(cloudstackTestCase):
             "The snapshot is not in the 'Ready' state."
         )
 
-    def _check_iscsi_name(self, sf_iscsi_name):
-        self.assertEqual(
-            sf_iscsi_name[0],
-            "/",
-            "The iSCSI name needs to start with a forward slash."
-        )
-
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
-        )
-
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
-        )
-
     def _get_vdis(self, xen_vdis, only_active_expected=False):
         expected_number_of_vdis = 1 if only_active_expected else 3
 
@@ -852,11 +836,3 @@ class TestVMSnapshots(cloudstackTestCase):
         vdis.base_vdi = base_vdi
 
         return vdis
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
-

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVolumes.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py
index ed7d42a..63b9be1 100644
--- a/test/integration/plugins/solidfire/TestVolumes.py
+++ b/test/integration/plugins/solidfire/TestVolumes.py
@@ -20,6 +20,8 @@ import random
 import SignedAPICall
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -39,11 +41,13 @@ from marvin.lib.utils import cleanup_resources
 
 from solidfire import solidfire_element_api as sf_api
 
-# on April 14, 2016: Ran 11 tests in 2494.043s with three hosts (resign = True)
-# on April 14, 2016: Ran 11 tests in 2033.516s with three hosts (resign = False)
-
-# on May 2, 2016: Ran 11 tests in 2352.461s with two hosts (resign = True)
-# on May 2, 2016: Ran 11 tests in 1982.066s with two hosts (resign = False)
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster
+#
+# Running the tests:
+#  Change the "supports_resign" variable to True or False as desired.
 
 
 class TestData():
@@ -145,7 +149,7 @@ class TestData():
                 "miniops": "10000",
                 "maxiops": "15000",
                 "hypervisorsnapshotreserve": 200,
-                "tags": "SolidFire_SAN_1"
+                TestData.tags: TestData.storageTag
             },
             TestData.diskOffering: {
                 "name": "SF_DO_1",
@@ -158,71 +162,6 @@ class TestData():
                 TestData.tags: TestData.storageTag,
                 "storagetype": "shared"
             },
-            "testdiskofferings": {
-                "customiopsdo": {
-                    "name": "SF_Custom_Iops_DO",
-                    "displaytext": "Customized Iops DO",
-                    "disksize": 128,
-                    "customizediops": True,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizedo": {
-                    "name": "SF_Custom_Size_DO",
-                    "displaytext": "Customized Size DO",
-                    "disksize": 175,
-                    "customizediops": False,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizeandiopsdo": {
-                    "name": "SF_Custom_Iops_Size_DO",
-                    "displaytext": "Customized Size and Iops DO",
-                    "disksize": 200,
-                    "customizediops": True,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newiopsdo": {
-                    "name": "SF_New_Iops_DO",
-                    "displaytext": "New Iops (min=350, max = 700)",
-                    "disksize": 128,
-                    "miniops": 350,
-                    "maxiops": 700,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizedo": {
-                    "name": "SF_New_Size_DO",
-                    "displaytext": "New Size: 175",
-                    "disksize": 175,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizeandiopsdo": {
-                    "name": "SF_New_Size_Iops_DO",
-                    "displaytext": "New Size and Iops",
-                    "disksize": 200,
-                    "miniops": 200,
-                    "maxiops": 400,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                }
-            },
             TestData.volume_1: {
                 TestData.diskName: "test-volume",
             },
@@ -241,14 +180,11 @@ class TestVolumes(cloudstackTestCase):
     _should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list."
     _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
     _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
-    _vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer."
     _volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer."
     _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
     _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state."
     _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state."
-    _sr_not_shared_err_msg = "The SR is not shared."
     _volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero."
-    _list_should_be_empty = "The list should be empty."
     _volume_should_not_be_in_a_vag = "The volume should not be in a volume access group."
 
     @classmethod
@@ -262,7 +198,7 @@ class TestVolumes(cloudstackTestCase):
 
         cls.supports_resign = True
 
-        cls._set_supports_resign()
+        sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection)
 
         # Set up xenAPI connection
         host_ip = "https://" + \
@@ -368,7 +304,7 @@ class TestVolumes(cloudstackTestCase):
 
             cls.primary_storage.delete(cls.apiClient)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -387,9 +323,9 @@ class TestVolumes(cloudstackTestCase):
         if self.supports_resign == False:
             return
 
-        sf_volumes = self._get_sf_volumes()
+        sf_volumes = self._get_active_sf_volumes()
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, TestData.templateCacheName)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -451,21 +387,23 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(new_volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_iscsi_name = self._get_iqn(new_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, newvolume.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, newvolume.name, self)
 
-        self._check_size_and_iops(sf_volume, newvolume, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, newvolume, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -481,9 +419,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -516,17 +454,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -559,9 +499,9 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -600,11 +540,11 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -614,9 +554,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -649,17 +589,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -674,17 +616,19 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -694,9 +638,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -729,17 +673,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -772,9 +718,9 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -796,9 +742,9 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -814,9 +760,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -849,17 +795,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -894,9 +842,9 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_stopped_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -918,9 +866,9 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -936,9 +884,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.stop(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -971,17 +919,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_stopped_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1003,17 +953,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1061,21 +1013,23 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_vag_id = self._get_vag_id()
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1114,11 +1068,11 @@ class TestVolumes(cloudstackTestCase):
             "Check if VM was actually expunged"
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -1174,21 +1128,23 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(new_volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_iscsi_name = self._get_iqn(new_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1219,11 +1175,11 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -1246,9 +1202,9 @@ class TestVolumes(cloudstackTestCase):
             "Check volume was deleted"
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_and_get_sf_volume(sf_volumes, vol.name, False)
+        sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
 
     @attr(hypervisor='XenServer')
     def test_09_attach_volumes_multiple_accounts(self):
@@ -1342,39 +1298,43 @@ class TestVolumes(cloudstackTestCase):
             str(test_vm.state)
         )
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(vol)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, vol, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
+
+        sf_test_account_id = sf_util.get_sf_account_id(self.cs_api, test_account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_test_account_id = self._get_sf_account_id(self.primary_storage.id, test_account.id)
+        sf_test_volumes = self._get_active_sf_volumes(sf_test_account_id)
 
-        sf_test_volumes = self._get_sf_volumes(sf_test_account_id)
+        sf_test_volume = sf_util.check_and_get_sf_volume(sf_test_volumes, test_vol.name, self)
 
-        sf_test_volume = self._check_and_get_sf_volume(sf_test_volumes, test_vol.name)
+        sf_test_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, test_vol, self)
 
-        sf_test_volume_size = self._get_volume_size_with_hsr(test_vol)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_test_volume_size)
 
-        self._check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size)
+        sf_util.check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size, self)
 
-        sf_test_iscsi_name = self._get_iqn(test_volume)
+        sf_test_iscsi_name = sf_util.get_iqn(self.cs_api, test_volume, self)
 
         self._check_xen_sr(sf_test_iscsi_name)
 
-        self._check_vag(sf_test_volume, sf_vag_id)
+        sf_util.check_vag(sf_test_volume, sf_vag_id, self)
 
     @attr(hypervisor='XenServer')
     def test_10_attach_more_than_one_disk_to_VM(self):
@@ -1417,66 +1377,50 @@ class TestVolumes(cloudstackTestCase):
 
         vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName])
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
+
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_2_size = sf_util.get_volume_size_with_hsr(self.cs_api, volume_2, self)
 
-        sf_volume_2_size = self._get_volume_size_with_hsr(volume_2)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_2_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
-        sf_volume_2 = self._check_and_get_sf_volume(sf_volumes, vol_2.name)
+        sf_volume_2 = sf_util.check_and_get_sf_volume(sf_volumes, vol_2.name, self)
 
-        self._check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size)
+        sf_util.check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size, self)
 
-        sf_iscsi_name_2 = self._get_iqn(volume_2)
+        sf_iscsi_name_2 = sf_util.get_iqn(self.cs_api, volume_2, self)
 
         self._check_xen_sr(sf_iscsi_name_2)
 
-        self._check_vag(sf_volume_2, sf_vag_id)
+        sf_util.check_vag(sf_volume_2, sf_vag_id, self)
 
         self.virtual_machine.detach_volume(self.apiClient, volume_2)
 
     '''
     @attr(hypervisor = 'XenServer')
-    def _test_11_attach_disk_to_running_vm_change_iops(self):
+    def test_11_attach_disk_to_running_vm_change_iops(self):
         Attach a disk to a running VM, then change iops
         self.custom_iops_disk_offering = DiskOffering.create(
             
         )'''
 
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
-        )
-
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
-        )
-
-    def _check_iscsi_name(self, sf_iscsi_name):
-        self.assertEqual(
-            sf_iscsi_name[0],
-            "/",
-            "The iSCSI name needs to start with a forward slash."
-        )
-
     def _check_volume(self, volume, volume_name):
         self.assertTrue(
             volume.name.startswith(volume_name),
@@ -1501,45 +1445,13 @@ class TestVolumes(cloudstackTestCase):
             "The storage type is incorrect."
         )
 
-    def _check_size_and_iops(self, sf_volume, volume, size):
-        self.assertEqual(
-            sf_volume['qos']['minIOPS'],
-            volume.miniops,
-            "Check QOS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
-        )
-
-        self.assertEqual(
-            sf_volume['qos']['maxIOPS'],
-            volume.maxiops,
-            "Check QOS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
-        )
-
-        self.assertEqual(
-            sf_volume['totalSize'],
-            size,
-            "Check SF volume size: " + str(sf_volume['totalSize'])
-        )
-
-    def _check_vag(self, sf_volume, sf_vag_id):
-        self.assertEqual(
-            len(sf_volume['volumeAccessGroups']),
-            1,
-            "The volume should only be in one VAG."
-        )
-
-        self.assertEqual(
-            sf_volume['volumeAccessGroups'][0],
-            sf_vag_id,
-            "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
-        )
-
     def _check_and_get_cs_volume(self, volume_id, volume_name):
         list_volumes_response = list_volumes(
             self.apiClient,
             id=volume_id
         )
 
-        self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg)
 
         cs_volume = list_volumes_response[0]
 
@@ -1547,108 +1459,37 @@ class TestVolumes(cloudstackTestCase):
 
         return cs_volume
 
-    def _get_sf_account_id(self, primary_storage_id, account_id):
-        sf_account_id_request = {'storageid': primary_storage_id, 'accountid': account_id}
-        sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request)
-        sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
-
-        self.assertEqual(
-            isinstance(sf_account_id, int),
-            True,
-            TestVolumes._sf_account_id_should_be_non_zero_int_err_msg
-        )
-
-        return sf_account_id
-
-    def _get_volume_size_with_hsr(self, cs_volume):
-        # Get underlying SF volume size with hypervisor snapshot reserve
-        sf_volume_size_request = {'volumeid': cs_volume.id}
-        sf_volume_size_result = self.cs_api.getSolidFireVolumeSize(sf_volume_size_request)
-        sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
-
-        self.assertEqual(
-            isinstance(sf_volume_size, int),
-            True,
-            "The SolidFire volume size should be a non-zero integer."
-        )
-
-        return sf_volume_size
-
-    def _get_vag_id(self):
-        # Get SF Volume Access Group ID
-        sf_vag_id_request = {'clusterid': self.cluster.id, 'storageid': self.primary_storage.id}
-        sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
-        sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
+    def _verify_hsr(self, cs_volume_size_in_gb, hsr, sf_volume_size_in_bytes):
+        cs_volume_size_including_hsr_in_bytes = self._get_cs_volume_size_including_hsr_in_bytes(cs_volume_size_in_gb, hsr)
 
-        self.assertEqual(
-            isinstance(sf_vag_id, int),
-            True,
-            TestVolumes._vag_id_should_be_non_zero_int_err_msg
-        )
+        self.assertTrue(
+            cs_volume_size_including_hsr_in_bytes == sf_volume_size_in_bytes,
+            "HSR does not add up correctly."
+        );
 
-        return sf_vag_id
+    def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr):
+        lowest_hsr = 10
 
-    def _get_iqn(self, volume):
-        # Get volume IQN
-        sf_iscsi_name_request = {'volumeid': volume.id}
-        sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
-        sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
+        if hsr < lowest_hsr:
+            hsr = lowest_hsr;
 
-        self._check_iscsi_name(sf_iscsi_name)
+        return self._get_bytes_from_gb(cs_volume_size_in_gb + (cs_volume_size_in_gb * (hsr / 100)))
 
-        return sf_iscsi_name
+    def _get_bytes_from_gb(self, number_in_gb):
+        return number_in_gb * 1024 * 1024 * 1024
 
     def _get_vm(self, vm_id):
         list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
 
-        self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg)
+        sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg)
 
         return list_vms_response[0]
 
-    def _check_and_get_sf_volume(self, sf_volumes, sf_volume_name, should_exist=True):
-        sf_volume = None
-
-        for volume in sf_volumes:
-            if volume['name'] == sf_volume_name:
-                sf_volume = volume
-                break
-
-        if should_exist:
-            self.assertNotEqual(
-                sf_volume,
-                None,
-                "Check if SF volume was created in correct account: " + str(sf_volumes)
-            )
-        else:
-            self.assertEqual(
-                sf_volume,
-                None,
-                "Check if SF volume was deleted: " + str(sf_volumes)
-            )
-
-        return sf_volume
-
     def _check_xen_sr(self, xen_sr_name, should_exist=True):
-        if should_exist:
-            xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)[0]
-
-            self.sr_shared = self.xen_session.xenapi.SR.get_shared(xen_sr)
-
-            self.assertEqual(
-                self.sr_shared,
-                True,
-                TestVolumes._sr_not_shared_err_msg
-            )
-        else:
-            xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
+        sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist)
 
-            self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty)
-
-    def _get_sf_volumes(self, sf_account_id=None):
-        if sf_account_id is not None:
-            sf_volumes = self.sf_client.list_volumes_for_account(sf_account_id)
-        else:
-            sf_volumes = self.sf_client.list_active_volumes()
+    def _get_active_sf_volumes(self, sf_account_id=None):
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
         self.assertNotEqual(
             len(sf_volumes),
@@ -1657,20 +1498,3 @@ class TestVolumes(cloudstackTestCase):
         )
 
         return sf_volumes
-
-    @classmethod
-    def _set_supports_resign(cls):
-        supports_resign = str(cls.supports_resign)
-
-        sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'"
-
-        # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
-        cls.dbConnection.execute(sql_query)
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
-

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/util/sf_util.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py
new file mode 100644
index 0000000..6629571
--- /dev/null
+++ b/test/integration/plugins/solidfire/util/sf_util.py
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
+    obj_assert.assertEqual(
+        isinstance(in_list, list),
+        True,
+        "'in_list' is not a list."
+    )
+
+    obj_assert.assertEqual(
+        len(in_list),
+        expected_size_of_list,
+        err_msg
+    )
+
+def get_sf_account_id(cs_api, cs_account_id, primary_storage_id, obj_assert, err_msg):
+    sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
+    sf_account_id_result = cs_api.getSolidFireAccountId(sf_account_id_request)
+    sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
+
+    obj_assert.assertEqual(
+        isinstance(sf_account_id, int),
+        True,
+        err_msg
+    )
+
+    return sf_account_id
+
+def get_iqn(cs_api, volume, obj_assert):
+    # Get volume IQN
+    sf_iscsi_name_request = {'volumeid': volume.id}
+    sf_iscsi_name_result = cs_api.getVolumeiScsiName(sf_iscsi_name_request)
+    sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
+
+    check_iscsi_name(sf_iscsi_name, obj_assert)
+
+    return sf_iscsi_name
+
+def check_iscsi_name(sf_iscsi_name, obj_assert):
+    obj_assert.assertEqual(
+        sf_iscsi_name[0],
+        "/",
+        "The iSCSI name needs to start with a forward slash."
+    )
+
+def set_supports_resign(supports_resign, db_connection):
+    _set_supports_resign_for_table(supports_resign, db_connection, "host_details")
+    _set_supports_resign_for_table(supports_resign, db_connection, "cluster_details")
+
+def _set_supports_resign_for_table(supports_resign, db_connection, table):
+    sql_query = "Update " + str(table) + " Set value = '" + str(supports_resign) + "' Where name = 'supportsResign'"
+
+    # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+    db_connection.execute(sql_query)
+
+def purge_solidfire_volumes(sf_client):
+    deleted_volumes = sf_client.list_deleted_volumes()
+
+    for deleted_volume in deleted_volumes:
+        sf_client.purge_deleted_volume(deleted_volume['volumeID'])
+
+def get_not_active_sf_volumes(sf_client, sf_account_id=None):
+    if sf_account_id is not None:
+        sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
+
+        if sf_volumes is not None and len(sf_volumes) > 0:
+            sf_volumes = _get_not_active_sf_volumes_only(sf_volumes)
+    else:
+        sf_volumes = sf_client.list_deleted_volumes()
+
+    return sf_volumes
+
+def _get_not_active_sf_volumes_only(sf_volumes):
+    not_active_sf_volumes_only = []
+
+    for sf_volume in sf_volumes:
+        if sf_volume["status"] != "active":
+            not_active_sf_volumes_only.append(sf_volume)
+
+    return not_active_sf_volumes_only
+
+def get_active_sf_volumes(sf_client, sf_account_id=None):
+    if sf_account_id is not None:
+        sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
+
+        if sf_volumes is not None and len(sf_volumes) > 0:
+            sf_volumes = _get_active_sf_volumes_only(sf_volumes)
+    else:
+        sf_volumes = sf_client.list_active_volumes()
+
+    return sf_volumes
+
+def _get_active_sf_volumes_only(sf_volumes):
+    active_sf_volumes_only = []
+
+    for sf_volume in sf_volumes:
+        if sf_volume["status"] == "active":
+            active_sf_volumes_only.append(sf_volume)
+
+    return active_sf_volumes_only
+
+def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist=True):
+    sf_volume = None
+
+    for volume in sf_volumes:
+        if volume['name'] == sf_volume_name:
+            sf_volume = volume
+            break
+
+    if should_exist:
+        obj_assert.assertNotEqual(
+            sf_volume,
+            None,
+            "Check if SF volume was created in correct account: " + str(sf_volumes)
+        )
+    else:
+        obj_assert.assertEqual(
+            sf_volume,
+            None,
+            "Check if SF volume was deleted: " + str(sf_volumes)
+        )
+
+    return sf_volume
+
+def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
+    xen_sr = xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
+
+    if should_exist:
+        check_list(xen_sr, 1, obj_assert, "SR " + xen_sr_name + " doesn't exist, but should.")
+
+        sr_shared = xen_session.xenapi.SR.get_shared(xen_sr[0])
+
+        obj_assert.assertEqual(
+            sr_shared,
+            True,
+            "SR " + xen_sr_name + " is not shared, but should be."
+        )
+    else:
+        check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
+
+def check_vag(sf_volume, sf_vag_id, obj_assert):
+    obj_assert.assertEqual(
+        len(sf_volume['volumeAccessGroups']),
+        1,
+        "The volume should only be in one VAG."
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['volumeAccessGroups'][0],
+        sf_vag_id,
+        "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
+    )
+
+def get_vag_id(cs_api, cluster_id, primary_storage_id, obj_assert):
+    # Get SF Volume Access Group ID
+    sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
+    sf_vag_id_result = cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
+    sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
+
+    obj_assert.assertEqual(
+        isinstance(sf_vag_id, int),
+        True,
+        "The SolidFire VAG ID should be a non-zero integer."
+    )
+
+    return sf_vag_id
+
+def format_iqn(iqn):
+    return "/" + iqn + "/0"
+
+def check_size_and_iops(sf_volume, cs_volume, size, obj_assert):
+    obj_assert.assertEqual(
+        sf_volume['qos']['minIOPS'],
+        cs_volume.miniops,
+        "Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['qos']['maxIOPS'],
+        cs_volume.maxiops,
+        "Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['totalSize'],
+        size,
+        "Check SolidFire volume size: " + str(sf_volume['totalSize'])
+    )
+
+def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):
+    # Get underlying SF volume size with hypervisor snapshot reserve
+    sf_volume_size_request = {'volumeid': cs_volume.id}
+    sf_volume_size_result = cs_api.getSolidFireVolumeSize(sf_volume_size_request)
+    sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
+
+    obj_assert.assertEqual(
+        isinstance(sf_volume_size, int),
+        True,
+        "The SolidFire volume size should be a non-zero integer."
+    )
+
+    return sf_volume_size


Mime
View raw message