Return-Path: X-Original-To: apmail-cloudstack-commits-archive@www.apache.org Delivered-To: apmail-cloudstack-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 34EAC17815 for ; Thu, 5 Feb 2015 12:06:25 +0000 (UTC) Received: (qmail 71124 invoked by uid 500); 5 Feb 2015 12:06:24 -0000 Delivered-To: apmail-cloudstack-commits-archive@cloudstack.apache.org Received: (qmail 70963 invoked by uid 500); 5 Feb 2015 12:06:24 -0000 Mailing-List: contact commits-help@cloudstack.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cloudstack.apache.org Delivered-To: mailing list commits@cloudstack.apache.org Received: (qmail 70690 invoked by uid 99); 5 Feb 2015 12:06:24 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 05 Feb 2015 12:06:24 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 3FD4CE0531; Thu, 5 Feb 2015 12:06:24 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: talluri@apache.org To: commits@cloudstack.apache.org Date: Thu, 05 Feb 2015 12:06:26 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [3/5] git commit: updated refs/heads/4.5 to 9658569 http://git-wip-us.apache.org/repos/asf/cloudstack/blob/76272743/test/integration/component/test_snapshots.py ---------------------------------------------------------------------- diff --git a/test/integration/component/test_snapshots.py b/test/integration/component/test_snapshots.py index 4bb2dcf..5b8cdc7 100644 --- a/test/integration/component/test_snapshots.py +++ b/test/integration/component/test_snapshots.py @@ -16,142 +16,143 @@ # under the License. """ P1 tests for Snapshots """ -#Import Local Modules -from nose.plugins.attrib import attr -from marvin.cloudstackTestCase import cloudstackTestCase - -from marvin.lib.base import (Snapshot, - Template, - VirtualMachine, - Account, - ServiceOffering, - DiskOffering, - Volume) - -from marvin.lib.common import (get_domain, - get_zone, - get_template, - list_events, - list_volumes, - list_snapshots, - list_templates, - list_virtual_machines, - ) - -from marvin.lib.utils import (cleanup_resources, - format_volume_to_ext3, - random_gen, - is_snapshot_on_nfs, - get_hypervisor_type) - -from marvin.cloudstackAPI import detachVolume -import time +# Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase + +from marvin.lib.base import (Snapshot, + Template, + VirtualMachine, + Account, + ServiceOffering, + DiskOffering, + Volume) + +from marvin.lib.common import (get_domain, + get_zone, + get_template, + list_events, + list_volumes, + list_snapshots, + list_templates, + list_virtual_machines, + ) +from marvin.lib.utils import (cleanup_resources, + format_volume_to_ext3, + random_gen, + is_snapshot_on_nfs, + get_hypervisor_type) + +from marvin.cloudstackAPI import detachVolume +import time class Services: + """Test Snapshots Services """ def __init__(self): self.services = { - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended for unique - # username - "password": "password", - }, - "service_offering": { - "name": "Tiny Instance", - "displaytext": "Tiny Instance", - "cpunumber": 1, - "cpuspeed": 200, # in MHz + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 200, # in MHz "memory": 256, # In MBs + }, + "disk_offering": { + "displaytext": "Small Disk", + "name": "Small Disk", + "disksize": 1 + }, + "server_with_disk": + { + "displayname": "Test VM -With Disk", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + + "server_without_disk": + { + "displayname": "Test VM-No Disk", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + # For NAT rule creation + "publicport": 22, + "protocol": 'TCP', + }, + "server": { + "displayname": "TestVM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "recurring_snapshot": { + "intervaltype": 'HOURLY', + # Frequency of snapshots + "maxsnaps": 1, # Should be min 2 + "schedule": 1, + "timezone": 'US/Arizona', + # Timezone Formats - + # http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack + }, + "templates": { + "displaytext": 'Template', + "name": 'Template', + "ostype": "CentOS 5.3 (64-bit)", + "templatefilter": 'self', + }, + "volume": { + "diskname": "APP Data Volume", + "size": 1, # in GBs + "xenserver": {"rootdiskdevice": "/dev/xvda", + "datadiskdevice_1": '/dev/xvdb', + "datadiskdevice_2": '/dev/xvdc', # Data Disk + }, + "kvm": {"rootdiskdevice": "/dev/vda", + "datadiskdevice_1": "/dev/vdb", + "datadiskdevice_2": "/dev/vdc" }, - "disk_offering": { - "displaytext": "Small Disk", - "name": "Small Disk", - "disksize": 1 - }, - "server_with_disk": - { - "displayname": "Test VM -With Disk", - "username": "root", - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - - "server_without_disk": - { - "displayname": "Test VM-No Disk", - "username": "root", - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - # For NAT rule creation - "publicport": 22, - "protocol": 'TCP', - }, - "server": { - "displayname": "TestVM", - "username": "root", - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - "recurring_snapshot": { - "intervaltype": 'HOURLY', - # Frequency of snapshots - "maxsnaps": 1, # Should be min 2 - "schedule": 1, - "timezone": 'US/Arizona', - # Timezone Formats - http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack - }, - "templates": { - "displaytext": 'Template', - "name": 'Template', - "ostype": "CentOS 5.3 (64-bit)", - "templatefilter": 'self', - }, - "volume": { - "diskname": "APP Data Volume", - "size": 1, # in GBs - "xenserver": {"rootdiskdevice":"/dev/xvda", - "datadiskdevice_1": '/dev/xvdb', - "datadiskdevice_2": '/dev/xvdc', # Data Disk - }, - "kvm": {"rootdiskdevice": "/dev/vda", - "datadiskdevice_1": "/dev/vdb", - "datadiskdevice_2": "/dev/vdc" - }, - "vmware": {"rootdiskdevice": "/dev/hda", - "datadiskdevice_1": "/dev/hdb", - "datadiskdevice_2": "/dev/hdc" - } - }, - "paths": { - "mount_dir": "/mnt/tmp", - "sub_dir": "test", - "sub_lvl_dir1": "test1", - "sub_lvl_dir2": "test2", - "random_data": "random.data", - }, - "ostype": "CentOS 5.3 (64-bit)", - # Cent OS 5.3 (64 bit) - "sleep": 60, - "timeout": 10, - } + "vmware": {"rootdiskdevice": "/dev/hda", + "datadiskdevice_1": "/dev/hdb", + "datadiskdevice_2": "/dev/hdc" + } + }, + "paths": { + "mount_dir": "/mnt/tmp", + "sub_dir": "test", + "sub_lvl_dir1": "test1", + "sub_lvl_dir2": "test2", + "random_data": "random.data", + }, + "ostype": "CentOS 5.3 (64-bit)", + # Cent OS 5.3 (64 bit) + "sleep": 60, + "timeout": 10, + } class TestSnapshots(cloudstackTestCase): @@ -167,17 +168,18 @@ class TestSnapshots(cloudstackTestCase): cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.disk_offering = DiskOffering.create( - cls.api_client, - cls.services["disk_offering"] - ) + cls.api_client, + cls.services["disk_offering"] + ) cls.template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) cls.services["domainid"] = cls.domain.id - cls.services["volume"]["zoneid"] = cls.services["server_with_disk"]["zoneid"] = cls.zone.id + cls.services["volume"]["zoneid"] = cls.services[ + "server_with_disk"]["zoneid"] = cls.zone.id cls.services["server_with_disk"]["diskoffering"] = cls.disk_offering.id cls.services["server_without_disk"]["zoneid"] = cls.zone.id @@ -187,23 +189,23 @@ class TestSnapshots(cloudstackTestCase): cls.services["diskoffering"] = cls.disk_offering.id cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) + cls.api_client, + cls.services["service_offering"] + ) # Get Hypervisor Type cls.hypervisor = (get_hypervisor_type(cls.api_client)).lower() cls._cleanup = [ - cls.service_offering, - cls.disk_offering - ] + cls.service_offering, + cls.disk_offering + ] return @classmethod def tearDownClass(cls): try: - #Cleanup resources used + # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -216,92 +218,105 @@ class TestSnapshots(cloudstackTestCase): # Create VMs, NAT Rules etc self.account = Account.create( - self.apiclient, - self.services["account"], - domainid=self.domain.id - ) + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) self.virtual_machine = self.virtual_machine_with_disk = \ - VirtualMachine.create( - self.api_client, - self.services["server_with_disk"], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - mode=self.services["mode"] - ) + VirtualMachine.create( + self.api_client, + self.services["server_with_disk"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.services["mode"] + ) self.cleanup = [self.account, ] return def tearDown(self): try: - #Clean up, terminate the created instance, volumes and snapshots + # Clean up, terminate the created instance, volumes and snapshots cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(speed = "slow") + @attr(speed="slow") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_02_snapshot_data_disk(self): """Test Snapshot Data Disk """ volume = list_volumes( - self.apiclient, - virtualmachineid=self.virtual_machine_with_disk.id, - type='DATADISK', - listall=True - ) + self.apiclient, + virtualmachineid=self.virtual_machine_with_disk.id, + type='DATADISK', + listall=True + ) self.assertEqual( - isinstance(volume, list), - True, - "Check list response returns a valid list" - ) + isinstance(volume, list), + True, + "Check list response returns a valid list" + ) self.debug("Creating a Snapshot from data volume: %s" % volume[0].id) snapshot = Snapshot.create( - self.apiclient, - volume[0].id, - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + volume[0].id, + account=self.account.name, + domainid=self.account.domainid + ) snapshots = list_snapshots( - self.apiclient, - id=snapshot.id - ) + self.apiclient, + id=snapshot.id + ) self.assertEqual( - isinstance(snapshots, list), - True, - "Check list response returns a valid list" - ) + isinstance(snapshots, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - snapshots, - None, - "Check if result exists in list item call" - ) + snapshots, + None, + "Check if result exists in list item call" + ) self.assertEqual( - snapshots[0].id, - snapshot.id, - "Check resource id in list resources call" - ) - self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) + snapshots[0].id, + snapshot.id, + "Check resource id in list resources call" + ) + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot.id)) return - @attr(speed = "slow") - @attr(tags=["advanced", "advancedns", "basic", "sg"], required_hardware="true") + @attr(speed="slow") + @attr( + tags=[ + "advanced", + "advancedns", + "basic", + "sg"], + required_hardware="true") def test_01_volume_from_snapshot(self): """Test Creating snapshot from volume having spaces in name(KVM) """ # Validate the following - #1. Create a virtual machine and data volume - #2. Attach data volume to VM - #3. Login to machine; create temp/test directories on data volume and write some random data - #4. Snapshot the Volume - #5. Create another Volume from snapshot - #6. Mount/Attach volume to another virtual machine - #7. Compare data, data should match + # 1. Create a virtual machine and data volume + # 2. Attach data volume to VM + # 3. Login to machine; create temp/test directories on data volume + # and write some random data + # 4. Snapshot the Volume + # 5. Create another Volume from snapshot + # 6. Mount/Attach volume to another virtual machine + # 7. Compare data, data should match random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) @@ -316,63 +331,60 @@ class TestSnapshots(cloudstackTestCase): self.virtual_machine.ipaddress) volume = Volume.create( - self.apiclient, - self.services["volume"], - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.disk_offering.id - ) + self.apiclient, + self.services["volume"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) self.debug("Created volume with ID: %s" % volume.id) self.virtual_machine.attach_volume( - self.apiclient, - volume - ) + self.apiclient, + volume + ) self.debug("Attach volume: %s to VM: %s" % - (volume.id, self.virtual_machine.id)) - + (volume.id, self.virtual_machine.id)) self.debug("Formatting volume: %s to ext3" % volume.id) - #Format partition using ext3 - # Note that this is the second data disk partition of virtual machine as it was already containing - # data disk before attaching the new volume, Hence datadiskdevice_2 + # Format partition using ext3 + # Note that this is the second data disk partition of virtual machine + # as it was already containing data disk before attaching the new + # volume, Hence datadiskdevice_2 format_volume_to_ext3( - ssh_client, - self.services["volume"][self.hypervisor]["datadiskdevice_2"] - ) - cmds = [ "fdisk -l", - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount -t ext3 %s1 %s" % ( - self.services["volume"][self.hypervisor]["datadiskdevice_2"], - self.services["paths"]["mount_dir"] - ), - "mkdir -p %s/%s/{%s,%s} " % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["sub_lvl_dir2"] - ), - "echo %s > %s/%s/%s/%s" % ( - random_data_0, - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - ), - "echo %s > %s/%s/%s/%s" % ( - random_data_1, - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir2"], - self.services["paths"]["random_data"] - ), - "cat %s/%s/%s/%s" % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - ) - ] + ssh_client, + self.services["volume"][self.hypervisor]["datadiskdevice_2"] + ) + cmds = [ + "fdisk -l", + "mkdir -p %s" % + self.services["paths"]["mount_dir"], + "mount -t ext3 %s1 %s" % + (self.services["volume"][ + self.hypervisor]["datadiskdevice_2"], + self.services["paths"]["mount_dir"]), + "mkdir -p %s/%s/{%s,%s} " % + (self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["sub_lvl_dir2"]), + "echo %s > %s/%s/%s/%s" % + (random_data_0, + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"]), + "echo %s > %s/%s/%s/%s" % + (random_data_1, + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir2"], + self.services["paths"]["random_data"]), + "cat %s/%s/%s/%s" % + (self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"])] for c in cmds: self.debug("Command: %s" % c) result = ssh_client.execute(c) @@ -380,95 +392,95 @@ class TestSnapshots(cloudstackTestCase): # Unmount the Sec Storage cmds = [ - "umount %s" % (self.services["paths"]["mount_dir"]), - ] + "umount %s" % (self.services["paths"]["mount_dir"]), + ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) list_volume_response = Volume.list( - self.apiclient, - virtualmachineid=self.virtual_machine.id, - type='DATADISK', - id=volume.id - ) + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='DATADISK', + id=volume.id + ) self.assertEqual( - isinstance(list_volume_response, list), - True, - "Check list volume response for valid data" - ) + isinstance(list_volume_response, list), + True, + "Check list volume response for valid data" + ) volume_response = list_volume_response[0] - #Create snapshot from attached volume + # Create snapshot from attached volume snapshot = Snapshot.create( - self.apiclient, - volume_response.id, - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + volume_response.id, + account=self.account.name, + domainid=self.account.domainid + ) self.debug("Created snapshot: %s" % snapshot.id) - #Create volume from snapshot + # Create volume from snapshot volume_from_snapshot = Volume.create_from_snapshot( - self.apiclient, - snapshot.id, - self.services["volume"], - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + snapshot.id, + self.services["volume"], + account=self.account.name, + domainid=self.account.domainid + ) # Detach the volume from virtual machine self.virtual_machine.detach_volume( - self.apiclient, - volume - ) + self.apiclient, + volume + ) self.debug("Detached volume: %s from VM: %s" % - (volume.id, self.virtual_machine.id)) + (volume.id, self.virtual_machine.id)) self.debug("Created Volume: %s from Snapshot: %s" % ( - volume_from_snapshot.id, - snapshot.id)) + volume_from_snapshot.id, + snapshot.id)) volumes = Volume.list( - self.apiclient, - id=volume_from_snapshot.id - ) + self.apiclient, + id=volume_from_snapshot.id + ) self.assertEqual( - isinstance(volumes, list), - True, - "Check list response returns a valid list" - ) + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(volumes), - None, - "Check Volume list Length" - ) + len(volumes), + None, + "Check Volume list Length" + ) self.assertEqual( - volumes[0].id, - volume_from_snapshot.id, - "Check Volume in the List Volumes" - ) - #Attaching volume to new VM + volumes[0].id, + volume_from_snapshot.id, + "Check Volume in the List Volumes" + ) + # Attaching volume to new VM new_virtual_machine = VirtualMachine.create( - self.apiclient, - self.services["server_without_disk"], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - mode=self.services["mode"] - ) + self.apiclient, + self.services["server_without_disk"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.services["mode"] + ) self.debug("Deployed new VM for account: %s" % self.account.name) - #self.cleanup.append(new_virtual_machine) + # self.cleanup.append(new_virtual_machine) self.debug("Attaching volume: %s to VM: %s" % ( - volume_from_snapshot.id, - new_virtual_machine.id - )) + volume_from_snapshot.id, + new_virtual_machine.id + )) new_virtual_machine.attach_volume( - self.apiclient, - volume_from_snapshot - ) + self.apiclient, + volume_from_snapshot + ) # Rebooting is required so that newly attached disks are detected self.debug("Rebooting : %s" % new_virtual_machine.id) @@ -476,17 +488,20 @@ class TestSnapshots(cloudstackTestCase): new_virtual_machine.reboot(self.apiclient) try: - #Login to VM to verify test directories and files + # Login to VM to verify test directories and files ssh = new_virtual_machine.get_ssh_client() - # Mount datadiskdevice_1 because this is the first data disk of the new virtual machine - cmds = ["fdisk -l", - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount -t ext3 %s1 %s" % ( - self.services["volume"][self.hypervisor]["datadiskdevice_1"], - self.services["paths"]["mount_dir"] - ), - ] + # Mount datadiskdevice_1 because this is the first data disk of the + # new virtual machine + cmds = [ + "fdisk -l", + "mkdir -p %s" % + self.services["paths"]["mount_dir"], + "mount -t ext3 %s1 %s" % + (self.services["volume"][ + self.hypervisor]["datadiskdevice_1"], + self.services["paths"]["mount_dir"]), + ] for c in cmds: self.debug("Command: %s" % c) @@ -494,107 +509,119 @@ class TestSnapshots(cloudstackTestCase): self.debug(result) returned_data_0 = ssh.execute( - "cat %s/%s/%s/%s" % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - )) + "cat %s/%s/%s/%s" % ( + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"] + )) returned_data_1 = ssh.execute( - "cat %s/%s/%s/%s" % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir2"], - self.services["paths"]["random_data"] - )) + "cat %s/%s/%s/%s" % ( + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir2"], + self.services["paths"]["random_data"] + )) except Exception as e: self.fail("SSH access failed for VM: %s, Exception: %s" % - (new_virtual_machine.ipaddress, e)) + (new_virtual_machine.ipaddress, e)) self.debug("returned_data_0: %s" % returned_data_0[0]) self.debug("returned_data_1: %s" % returned_data_1[0]) - #Verify returned data + # Verify returned data self.assertEqual( - random_data_0, - returned_data_0[0], - "Verify newly attached volume contents with existing one" - ) + random_data_0, + returned_data_0[0], + "Verify newly attached volume contents with existing one" + ) self.assertEqual( - random_data_1, - returned_data_1[0], - "Verify newly attached volume contents with existing one" - ) + random_data_1, + returned_data_1[0], + "Verify newly attached volume contents with existing one" + ) # Unmount the Sec Storage cmds = [ - "umount %s" % (self.services["paths"]["mount_dir"]), - ] + "umount %s" % (self.services["paths"]["mount_dir"]), + ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) return - @attr(speed = "slow") + @attr(speed="slow") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_04_delete_snapshot(self): """Test Delete Snapshot """ - #1. Snapshot the Volume - #2. Delete the snapshot - #3. Verify snapshot is removed by calling List Snapshots API - #4. Verify snapshot was removed from image store + # 1. Snapshot the Volume + # 2. Delete the snapshot + # 3. Verify snapshot is removed by calling List Snapshots API + # 4. Verify snapshot was removed from image store self.debug("Creating volume under account: %s" % self.account.name) volume = Volume.create( - self.apiclient, - self.services["volume"], - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.disk_offering.id - ) + self.apiclient, + self.services["volume"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) self.debug("Created volume: %s" % volume.id) self.debug("Attaching volume to vm: %s" % self.virtual_machine.id) self.virtual_machine.attach_volume( - self.apiclient, - volume - ) + self.apiclient, + volume + ) self.debug("Volume attached to vm") volumes = list_volumes( - self.apiclient, - virtualmachineid=self.virtual_machine.id, - type='DATADISK', - id=volume.id - ) + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='DATADISK', + id=volume.id + ) self.assertEqual( - isinstance(volumes, list), - True, - "Check list response returns a valid list" - ) + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) snapshot = Snapshot.create( - self.apiclient, - volumes[0].id, - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + volumes[0].id, + account=self.account.name, + domainid=self.account.domainid + ) snapshot.delete(self.apiclient) snapshots = list_snapshots( - self.apiclient, - id=snapshot.id - ) + self.apiclient, + id=snapshot.id + ) self.assertEqual( - snapshots, - None, - "Check if result exists in list item call" - ) - self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) + snapshots, + None, + "Check if result exists in list item call" + ) + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot.id)) return - @attr(speed = "slow") - @attr(tags=["advanced", "advancedns", "basic", "sg"], required_hardware="true") + @attr(speed="slow") + @attr( + tags=[ + "advanced", + "advancedns", + "basic", + "sg"], + required_hardware="true") def test_03_snapshot_detachedDisk(self): """Test snapshot from detached disk """ @@ -609,167 +636,174 @@ class TestSnapshots(cloudstackTestCase): # 6. verify backup_snap_id was non null in the `snapshots` table volumes = list_volumes( - self.apiclient, - virtualmachineid=self.virtual_machine.id, - type='DATADISK', - listall=True - ) + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='DATADISK', + listall=True + ) self.assertEqual( - isinstance(volumes, list), - True, - "Check list response returns a valid list" - ) + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) volume = volumes[0] random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) try: ssh_client = self.virtual_machine.get_ssh_client() - #Format partition using ext3 + # Format partition using ext3 format_volume_to_ext3( - ssh_client, - self.services["volume"][self.hypervisor]["datadiskdevice_1"] - ) + ssh_client, + self.services["volume"][self.hypervisor]["datadiskdevice_1"] + ) cmds = [ - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount %s1 %s" % ( - self.services["volume"][self.hypervisor]["datadiskdevice_1"], - self.services["paths"]["mount_dir"] - ), - "pushd %s" % self.services["paths"]["mount_dir"], - "mkdir -p %s/{%s,%s} " % ( - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["sub_lvl_dir2"] - ), - "echo %s > %s/%s/%s" % ( - random_data_0, - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - ), - "echo %s > %s/%s/%s" % ( - random_data_1, - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir2"], - self.services["paths"]["random_data"] - ), - "sync", - "umount %s" % (self.services["paths"]["mount_dir"]), - ] + "mkdir -p %s" % + self.services["paths"]["mount_dir"], + "mount %s1 %s" % + (self.services["volume"][ + self.hypervisor]["datadiskdevice_1"], + self.services["paths"]["mount_dir"]), + "pushd %s" % + self.services["paths"]["mount_dir"], + "mkdir -p %s/{%s,%s} " % + (self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["sub_lvl_dir2"]), + "echo %s > %s/%s/%s" % + (random_data_0, + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"]), + "echo %s > %s/%s/%s" % + (random_data_1, + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir2"], + self.services["paths"]["random_data"]), + "sync", + "umount %s" % + (self.services["paths"]["mount_dir"]), + ] for c in cmds: self.debug(ssh_client.execute(c)) - #detach volume from VM + # detach volume from VM cmd = detachVolume.detachVolumeCmd() cmd.id = volume.id self.apiclient.detachVolume(cmd) - #Create snapshot from detached volume + # Create snapshot from detached volume snapshot = Snapshot.create(self.apiclient, volume.id) volumes = list_volumes( - self.apiclient, - virtualmachineid=self.virtual_machine.id, - type='DATADISK', - listall=True - ) + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='DATADISK', + listall=True + ) self.assertEqual( - volumes, - None, - "Check Volume is detached" - ) + volumes, + None, + "Check Volume is detached" + ) # Verify the snapshot was created or not snapshots = list_snapshots( - self.apiclient, - id=snapshot.id - ) + self.apiclient, + id=snapshot.id + ) self.assertNotEqual( - snapshots, - None, - "Check if result exists in list snapshots call" - ) + snapshots, + None, + "Check if result exists in list snapshots call" + ) self.assertEqual( - snapshots[0].id, - snapshot.id, - "Check snapshot id in list resources call" - ) + snapshots[0].id, + snapshot.id, + "Check snapshot id in list resources call" + ) except Exception as e: self.fail("SSH failed for VM with IP: %s - %s" % - (self.virtual_machine.ssh_ip, e)) + (self.virtual_machine.ssh_ip, e)) qresultset = self.dbclient.execute( - "select id from snapshots where uuid = '%s';" \ - % snapshot.id - ) + "select id from snapshots where uuid = '%s';" + % snapshot.id + ) self.assertNotEqual( - len(qresultset), - 0, - "Check DB Query result set" - ) + len(qresultset), + 0, + "Check DB Query result set" + ) qresult = qresultset[0] self.assertNotEqual( - str(qresult[0]), - 'NULL', - "Check if backup_snap_id is not null" - ) + str(qresult[0]), + 'NULL', + "Check if backup_snap_id is not null" + ) return - @attr(speed = "slow") - @attr(tags=["advanced", "advancedns", "smoke", "xen"], required_hardware="true") + @attr(speed="slow") + @attr( + tags=[ + "advanced", + "advancedns", + "smoke", + "xen"], + required_hardware="true") def test_07_template_from_snapshot(self): """Create Template from snapshot """ - #1. Login to machine; create temp/test directories on data volume - #2. Snapshot the Volume - #3. Create Template from snapshot - #4. Deploy Virtual machine using this template - #5. Login to newly created virtual machine - #6. Compare data in the root disk with the one that was written on the volume, it should match + # 1. Login to machine; create temp/test directories on data volume + # 2. Snapshot the Volume + # 3. Create Template from snapshot + # 4. Deploy Virtual machine using this template + # 5. Login to newly created virtual machine + # 6. Compare data in the root disk with the one that was written on the + # volume, it should match userapiclient = self.testClient.getUserApiClient( - UserName=self.account.name, - DomainName=self.account.domain) + UserName=self.account.name, + DomainName=self.account.domain) random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) try: - #Login to virtual machine + # Login to virtual machine ssh_client = self.virtual_machine.get_ssh_client() cmds = [ - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount %s1 %s" % ( - self.services["volume"][self.hypervisor]["rootdiskdevice"], - self.services["paths"]["mount_dir"] - ), - "mkdir -p %s/%s/{%s,%s} " % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["sub_lvl_dir2"] - ), - "echo %s > %s/%s/%s/%s" % ( - random_data_0, - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - ), - "echo %s > %s/%s/%s/%s" % ( - random_data_1, - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir2"], - self.services["paths"]["random_data"] - ), - "sync", - ] + "mkdir -p %s" % self.services["paths"]["mount_dir"], + "mount %s1 %s" % ( + self.services["volume"][self.hypervisor]["rootdiskdevice"], + self.services["paths"]["mount_dir"] + ), + "mkdir -p %s/%s/{%s,%s} " % ( + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["sub_lvl_dir2"] + ), + "echo %s > %s/%s/%s/%s" % ( + random_data_0, + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"] + ), + "echo %s > %s/%s/%s/%s" % ( + random_data_1, + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir2"], + self.services["paths"]["random_data"] + ), + "sync", + ] for c in cmds: self.debug(c) @@ -778,125 +812,124 @@ class TestSnapshots(cloudstackTestCase): except Exception as e: self.fail("SSH failed for VM with IP address: %s" % - self.virtual_machine.ipaddress) + self.virtual_machine.ipaddress) # Unmount the Volume cmds = [ - "umount %s" % (self.services["paths"]["mount_dir"]), - ] + "umount %s" % (self.services["paths"]["mount_dir"]), + ] for c in cmds: self.debug(c) ssh_client.execute(c) volumes = list_volumes( - userapiclient, - virtualmachineid=self.virtual_machine.id, - type='ROOT', - listall=True - ) + userapiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True + ) self.assertEqual( - isinstance(volumes, list), - True, - "Check list response returns a valid list" - ) + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) volume = volumes[0] - #Create a snapshot of volume + # Create a snapshot of volume snapshot = Snapshot.create( - userapiclient, - volume.id, - account=self.account.name, - domainid=self.account.domainid - ) + userapiclient, + volume.id, + account=self.account.name, + domainid=self.account.domainid + ) self.debug("Snapshot created from volume ID: %s" % volume.id) # Generate template from the snapshot template = Template.create_from_snapshot( - userapiclient, - snapshot, - self.services["templates"] - ) + userapiclient, + snapshot, + self.services["templates"] + ) self.cleanup.append(template) self.debug("Template created from snapshot ID: %s" % snapshot.id) # Verify created template templates = list_templates( - userapiclient, - templatefilter=\ - self.services["templates"]["templatefilter"], - id=template.id - ) + userapiclient, + templatefilter=self.services["templates"]["templatefilter"], + id=template.id + ) self.assertNotEqual( - templates, - None, - "Check if result exists in list item call" - ) + templates, + None, + "Check if result exists in list item call" + ) self.assertEqual( - templates[0].id, - template.id, - "Check new template id in list resources call" - ) + templates[0].id, + template.id, + "Check new template id in list resources call" + ) self.debug("Deploying new VM from template: %s" % template.id) # Deploy new virtual machine using template new_virtual_machine = VirtualMachine.create( - userapiclient, - self.services["server_without_disk"], - templateid=template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - mode=self.services["mode"] - ) + userapiclient, + self.services["server_without_disk"], + templateid=template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.services["mode"] + ) try: - #Login to VM & mount directory + # Login to VM & mount directory ssh = new_virtual_machine.get_ssh_client() cmds = [ - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount %s1 %s" % ( - self.services["volume"][self.hypervisor]["rootdiskdevice"], - self.services["paths"]["mount_dir"] - ) - ] + "mkdir -p %s" % self.services["paths"]["mount_dir"], + "mount %s1 %s" % ( + self.services["volume"][self.hypervisor]["rootdiskdevice"], + self.services["paths"]["mount_dir"] + ) + ] for c in cmds: ssh.execute(c) returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir1"], - self.services["paths"]["random_data"] - )) + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir1"], + self.services["paths"]["random_data"] + )) self.debug(returned_data_0) returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % ( - self.services["paths"]["mount_dir"], - self.services["paths"]["sub_dir"], - self.services["paths"]["sub_lvl_dir2"], - self.services["paths"]["random_data"] - )) + self.services["paths"]["mount_dir"], + self.services["paths"]["sub_dir"], + self.services["paths"]["sub_lvl_dir2"], + self.services["paths"]["random_data"] + )) self.debug(returned_data_1) except Exception as e: self.fail("SSH failed for VM with IP address: %s" % - new_virtual_machine.ipaddress) - #Verify returned data + new_virtual_machine.ipaddress) + # Verify returned data self.assertEqual( - random_data_0, - returned_data_0[0], - "Verify newly attached volume contents with existing one" - ) + random_data_0, + returned_data_0[0], + "Verify newly attached volume contents with existing one" + ) self.assertEqual( - random_data_1, - returned_data_1[0], - "Verify newly attached volume contents with existing one" - ) + random_data_1, + returned_data_1[0], + "Verify newly attached volume contents with existing one" + ) # Unmount the volume cmds = [ - "umount %s" % (self.services["paths"]["mount_dir"]), - ] + "umount %s" % (self.services["paths"]["mount_dir"]), + ] try: for c in cmds: self.debug(c) @@ -904,7 +937,7 @@ class TestSnapshots(cloudstackTestCase): except Exception as e: self.fail("SSH failed for VM with IP address: %s, Exception: %s" % - (new_virtual_machine.ipaddress, e)) + (new_virtual_machine.ipaddress, e)) return @@ -912,9 +945,11 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase): @classmethod def setUpClass(cls): - cls.testClient = super(TestCreateVMSnapshotTemplate, cls).getClsTestClient() + cls.testClient = super( + TestCreateVMSnapshotTemplate, + cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() - + cls.hypervisor = cls.testClient.getHypervisorInfo() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) @@ -922,35 +957,34 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase): cls.services['mode'] = cls.zone.networktype cls.template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) cls.services["domainid"] = cls.domain.id cls.services["server"]["zoneid"] = cls.zone.id - # Create VMs, NAT Rules etc cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=cls.domain.id - ) + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) + cls.api_client, + cls.services["service_offering"] + ) cls._cleanup = [ - cls.service_offering, - cls.account, - ] + cls.service_offering, + cls.account, + ] return @classmethod def tearDownClass(cls): try: - #Cleanup resources used + # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -965,13 +999,13 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase): def tearDown(self): try: - #Clean up, terminate the created instance, volumes and snapshots + # Clean up, terminate the created instance, volumes and snapshots cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(speed = "slow") + @attr(speed="slow") @attr(tags=["advanced", "advancedns"], required_hardware="true") def test_01_createVM_snapshotTemplate(self): """Test create VM, Snapshot and Template @@ -984,35 +1018,39 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase): # 4. Create a instance from above created template. # 5. listSnapshots should list the snapshot that was created. # 6. verify that secondary storage NFS share contains the reqd - # volume under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid + # volume under /secondary/snapshots/$accountid/ + # $volumeid/$snapshot_uuid # 7. verify backup_snap_id was non null in the `snapshots` table # 8. listTemplates() should return the newly created Template, # and check for template state as READY" # 9. listVirtualMachines() command should return the deployed VM. # State of this VM should be Running. - #Create Virtual Machine + # Create Virtual Machine + + if self.hypervisor.lower() in ['hyperv']: + self.skipTest("Snapshots feature is not supported on Hyper-V") userapiclient = self.testClient.getUserApiClient( - UserName=self.account.name, - DomainName=self.account.domain) + UserName=self.account.name, + DomainName=self.account.domain) self.virtual_machine = VirtualMachine.create( - userapiclient, - self.services["server"], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id - ) + userapiclient, + self.services["server"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id + ) self.debug("Created VM with ID: %s" % self.virtual_machine.id) # Get the Root disk of VM volumes = list_volumes( - userapiclient, - virtualmachineid=self.virtual_machine.id, - type='ROOT', - listall=True - ) + userapiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True + ) volume = volumes[0] # Create a snapshot from the ROOTDISK @@ -1021,105 +1059,111 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase): self.cleanup.append(snapshot) snapshots = list_snapshots( - userapiclient, - id=snapshot.id - ) + userapiclient, + id=snapshot.id + ) self.assertEqual( - isinstance(snapshots, list), - True, - "Check list response returns a valid list" - ) + isinstance(snapshots, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - snapshots, - None, - "Check if result exists in list snapshots call" - ) + snapshots, + None, + "Check if result exists in list snapshots call" + ) self.assertEqual( - snapshots[0].id, - snapshot.id, - "Check snapshot id in list resources call" - ) - self.debug("select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \ - % snapshot.id) + snapshots[0].id, + snapshot.id, + "Check snapshot id in list resources call" + ) + self.debug( + "select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" % + snapshot.id) snapshot_uuid = snapshot.id # Generate template from the snapshot template = Template.create_from_snapshot( - userapiclient, - snapshot, - self.services["templates"] - ) + userapiclient, + snapshot, + self.services["templates"] + ) self.debug("Created template from snapshot: %s" % template.id) self.cleanup.append(template) templates = list_templates( - userapiclient, - templatefilter=\ - self.services["templates"]["templatefilter"], - id=template.id - ) + userapiclient, + templatefilter=self.services["templates"]["templatefilter"], + id=template.id + ) self.assertNotEqual( - templates, - None, - "Check if result exists in list item call" - ) + templates, + None, + "Check if result exists in list item call" + ) self.assertEqual( - templates[0].isready, - True, - "Check new template state in list templates call" - ) + templates[0].isready, + True, + "Check new template state in list templates call" + ) # Deploy new virtual machine using template new_virtual_machine = VirtualMachine.create( - userapiclient, - self.services["server"], - templateid=template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id - ) + userapiclient, + self.services["server"], + templateid=template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id + ) self.debug("Created VM with ID: %s from template: %s" % ( - new_virtual_machine.id, - template.id - )) + new_virtual_machine.id, + template.id + )) self.cleanup.append(new_virtual_machine) # Newly deployed VM should be 'Running' virtual_machines = list_virtual_machines( - userapiclient, - id=new_virtual_machine.id, - account=self.account.name, - domainid=self.account.domainid - ) + userapiclient, + id=new_virtual_machine.id, + account=self.account.name, + domainid=self.account.domainid + ) self.assertEqual( - isinstance(virtual_machines, list), - True, - "Check list response returns a valid list" - ) + isinstance(virtual_machines, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(virtual_machines), - 0, - "Check list virtual machines response" - ) + len(virtual_machines), + 0, + "Check list virtual machines response" + ) for virtual_machine in virtual_machines: self.assertEqual( - virtual_machine.state, - 'Running', - "Check list VM response for Running state" - ) - self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot_uuid)) + virtual_machine.state, + 'Running', + "Check list VM response for Running state" + ) + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot_uuid)) return + class TestSnapshotEvents(cloudstackTestCase): @classmethod def setUpClass(cls): cls.testClient = super(TestSnapshotEvents, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() - - + cls.hypervisor = cls.testClient.getHypervisorInfo() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) @@ -1127,46 +1171,46 @@ class TestSnapshotEvents(cloudstackTestCase): cls.services['mode'] = cls.zone.networktype template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) cls.services["server"]["zoneid"] = cls.zone.id cls.services["template"] = template.id # Create VMs, NAT Rules etc cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=cls.domain.id - ) + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) + cls.api_client, + cls.services["service_offering"] + ) cls.virtual_machine = VirtualMachine.create( - cls.api_client, - cls.services["server"], - templateid=template.id, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id - ) + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id + ) cls._cleanup = [ - cls.service_offering, - cls.account, - ] + cls.service_offering, + cls.account, + ] return @classmethod def tearDownClass(cls): try: - #Cleanup resources used + # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -1181,34 +1225,38 @@ class TestSnapshotEvents(cloudstackTestCase): def tearDown(self): try: - #Clean up, terminate the created instance, volumes and snapshots + # Clean up, terminate the created instance, volumes and snapshots cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(speed = "slow") + @attr(speed="slow") @attr(tags=["advanced", "advancedns"], required_hardware="false") def test_05_snapshot_events(self): """Test snapshot events """ # Validate the following - # 1. Perform snapshot on the root disk of this VM and check the events/alerts. + # 1. Perform snapshot on the root disk of this VM and + # check the events/alerts. # 2. delete the snapshots and check the events/alerts # 3. listEvents() shows created/deleted snapshot events + if self.hypervisor.lower() in ['hyperv']: + self.skipTest("Snapshots feature is not supported on Hyper-V") + # Get the Root disk of VM volumes = list_volumes( - self.apiclient, - virtualmachineid=self.virtual_machine.id, - type='ROOT', - listall=True - ) + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True + ) self.assertEqual( - isinstance(volumes, list), - True, - "Check list response returns a valid list" - ) + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) volume = volumes[0] # Create a snapshot from the ROOTDISK @@ -1216,47 +1264,47 @@ class TestSnapshotEvents(cloudstackTestCase): self.debug("Snapshot created with ID: %s" % snapshot.id) snapshots = list_snapshots( - self.apiclient, - id=snapshot.id - ) + self.apiclient, + id=snapshot.id + ) self.assertEqual( - isinstance(snapshots, list), - True, - "Check list response returns a valid list" - ) + isinstance(snapshots, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - snapshots, - None, - "Check if result exists in list snapshots call" - ) + snapshots, + None, + "Check if result exists in list snapshots call" + ) self.assertEqual( - snapshots[0].id, - snapshot.id, - "Check snapshot id in list resources call" - ) + snapshots[0].id, + snapshot.id, + "Check snapshot id in list resources call" + ) snapshot.delete(self.apiclient) # Sleep to ensure that snapshot is deleted properly time.sleep(self.services["sleep"]) events = list_events( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - type='SNAPSHOT.DELETE' - ) + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + type='SNAPSHOT.DELETE' + ) self.assertEqual( - isinstance(events, list), - True, - "Check list response returns a valid list" - ) + isinstance(events, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - events, - None, - "Check if event exists in list events call" - ) + events, + None, + "Check if event exists in list events call" + ) self.assertIn( - events[0].state, - ['Completed', 'Scheduled'], - "Check events state in list events call" - ) + events[0].state, + ['Completed', 'Scheduled'], + "Check events state in list events call" + ) return http://git-wip-us.apache.org/repos/asf/cloudstack/blob/76272743/test/integration/component/test_ss_limits.py ---------------------------------------------------------------------- diff --git a/test/integration/component/test_ss_limits.py b/test/integration/component/test_ss_limits.py index d5ee063..32201d8 100644 --- a/test/integration/component/test_ss_limits.py +++ b/test/integration/component/test_ss_limits.py @@ -58,6 +58,7 @@ class TestSecondaryStorageLimits(cloudstackTestCase): cloudstackTestClient = super(TestSecondaryStorageLimits, cls).getClsTestClient() cls.api_client = cloudstackTestClient.getApiClient() + cls.hypervisor = cloudstackTestClient.getHypervisorInfo() # Fill services from the external config file cls.services = cloudstackTestClient.getParsedTestDataConfig() # Get Zone, Domain and templates @@ -205,6 +206,9 @@ class TestSecondaryStorageLimits(cloudstackTestCase): 5. Verify that the secondary storage count of the account equals the size of the template""" + if self.hypervisor.lower() in ['hyperv']: + self.skipTest("Snapshots feature is not supported on Hyper-V") + response = self.setupAccount(value) self.assertEqual(response[0], PASS, response[1])