cloudstack-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gauravaradhye <...@git.apache.org>
Subject [GitHub] cloudstack pull request: Disable enable zone pod cluster and host
Date Mon, 25 May 2015 06:41:42 GMT
Github user gauravaradhye commented on a diff in the pull request:

    https://github.com/apache/cloudstack/pull/285#discussion_r30964684
  
    --- Diff: test/integration/component/maint/testpath_disable_enable_zone.py ---
    @@ -429,71 +380,1093 @@ def test_01_disable_enable_zone(self):
                 zoneid=self.zone.id
             )
     
    -        self.assertNotEqual(user_vm_new,
    -                            None,
    +        self.assertNotEqual(user_vm_new.state,
    +                            RUNNING,
                                 "Verify that admin should create new VM")
     
    -        snap = Snapshot.create(
    +        Snapshot.create(
                 self.userapiclient,
                 root_volume[0].id)
     
    -        self.assertNotEqual(snap,
    -                            None,
    -                            "Verify that admin should snashot")
    -
             snapshots = list_snapshots(
                 self.userapiclient,
                 volumeid=root_volume[0].id,
                 listall=True)
     
    -        template_regis = Template.register(
    +        Template.register(
                 self.userapiclient,
                 self.testdata["privatetemplate"],
                 zoneid=self.zone.id)
     
    -        self.assertNotEqual(
    -            template_regis,
    -            None,
    -            "Check if template gets created"
    +        Volume.create(
    +            self.userapiclient,
    +            self.testdata["volume"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +            diskofferingid=self.disk_offering.id
             )
    -        self.assertNotEqual(
    -            template_from_snapshot,
    -            None,
    -            "Check if template gets created"
    +        Iso.create(
    +            self.userapiclient,
    +            self.testdata["iso2"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +        )
    +
    +        # Step 3
    +        # Deletion of zone should fail if vm,volume is present on the zone
    +        with self.assertRaises(Exception):
    +            self.zone.delete(self.apiclient)
    +
    +        return
    +
    +
    +class TestDisableEnablePod(cloudstackTestCase):
    +
    +    @classmethod
    +    def setUpClass(cls):
    +        testClient = super(TestDisableEnablePod, cls).getClsTestClient()
    +        cls.apiclient = testClient.getApiClient()
    +        cls.testdata = testClient.getParsedTestDataConfig()
    +        cls.hypervisor = cls.testClient.getHypervisorInfo()
    +
    +        # Get Zone, Domain and templates
    +        cls.domain = get_domain(cls.apiclient)
    +        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
    +        cls.pod = get_pod(
    +            cls.apiclient,
    +            zone_id=cls.zone.id)
    +
    +        cls.template = get_template(
    +            cls.apiclient,
    +            cls.zone.id,
    +            cls.testdata["ostype"])
    +
    +        cls._cleanup = []
    +
    +        try:
    +            cls.service_offering = ServiceOffering.create(
    +                cls.apiclient,
    +                cls.testdata["service_offering"],
    +            )
    +            cls._cleanup.append(cls.service_offering)
    +
    +            cls.disk_offering = DiskOffering.create(
    +                cls.apiclient,
    +                cls.testdata["disk_offering"],
    +            )
    +            cls._cleanup.append(cls.disk_offering)
    +
    +            # Create an account
    +            cls.account = Account.create(
    +                cls.apiclient,
    +                cls.testdata["account"],
    +                domainid=cls.domain.id
    +            )
    +            cls._cleanup.append(cls.account)
    +
    +            # Create user api client of the account
    +            cls.userapiclient = testClient.getUserApiClient(
    +                UserName=cls.account.name,
    +                DomainName=cls.account.domain
    +            )
    +
    +        except Exception as e:
    +            cls.tearDownClass()
    +            raise e
    +        return
    +
    +    @classmethod
    +    def tearDownClass(cls):
    +        try:
    +            podList = Pod.list(cls.apiclient, id=cls.pod.id)
    +            if podList[0].allocationstate == DISABLED:
    +                cmd = updatePod.updatePodCmd()
    +                cmd.id = podList[0].id
    +                cmd.allocationstate = ENABLED
    +                cls.apiclient.updatePod(cmd)
    +
    +            cleanup_resources(cls.apiclient, cls._cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +
    +    def setUp(self):
    +        self.apiclient = self.testClient.getApiClient()
    +        self.dbclient = self.testClient.getDbConnection()
    +        self.cleanup = []
    +
    +    def tearDown(self):
    +        try:
    +            cleanup_resources(self.apiclient, self.cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +        return
    +
    +    @attr(tags=["advanced", "basic"], required_hardware="true")
    +    def test_01_disable_enable_pod(self):
    +        """disable enable Pod
    +            1. Disable pod and verify following things:
    +                For admin user:
    +                    -- Should be able to create new vm, snapshot,
    +                            volume,template,iso in the same pod
    +                For Non-admin user:
    +                    -- Should not be able to create new vm, snapshot,
    +                            volume,template,iso in the same pod
    +            2. Enable the above disabled pod and verify that:
    +                -All users should be able to create new vm, snapshot,
    +                volume,template,iso in the same pod
    +            3. Try to delete the pod and it should fail with error message:
    +                - "The pod is not deletable because there are servers
    +                running in this pod"
    +
    +        """
    +        # Step 1
    +        vm_user = VirtualMachine.create(
    +            self.userapiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id
    +        )
    +
    +        vm_root = VirtualMachine.create(
    +            self.apiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id
    +        )
    +
    +        cmd = updatePod.updatePodCmd()
    +        cmd.id = self.pod.id
    +        cmd.allocationstate = DISABLED
    +        self.apiclient.updatePod(cmd)
    +        podList = Pod.list(self.apiclient, id=self.pod.id)
    +
    +        self.assertEqual(podList[0].allocationstate,
    +                         DISABLED,
    +                         "Check if the pod is in disabled state"
    +                         )
    +        self.assertEqual(vm_user.state,
    +                         RUNNING,
    +                         "Verify that the user vm is running")
    +
    +        self.assertEqual(vm_root.state,
    +                         RUNNING,
    +                         "Verify that the admin vm is running")
    +
    +        VirtualMachine.create(
    +            self.apiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id,
             )
     
    -        data_volume = Volume.create(
    +        root_volume = list_volumes(
                 self.userapiclient,
    +            virtualmachineid=vm_root.id,
    +            type='ROOT',
    +            listall=True
    +        )
    +
    +        Snapshot.create(
    +            self.apiclient,
    +            root_volume[0].id)
    +
    +        snapshots = list_snapshots(
    +            self.apiclient,
    +            volumeid=root_volume[0].id,
    +            listall=True)
    +
    +        Template.create_from_snapshot(
    +            self.apiclient,
    +            snapshots[0],
    +            self.testdata["privatetemplate"])
    +
    +        builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
    +        self.testdata["privatetemplate"]["url"] = builtin_info[0]
    +        self.testdata["privatetemplate"]["hypervisor"] = builtin_info[1]
    +        self.testdata["privatetemplate"]["format"] = builtin_info[2]
    +
    +        Template.register(
    +            self.apiclient,
    +            self.testdata["privatetemplate"],
    +            zoneid=self.zone.id)
    +
    +        Volume.create(
    +            self.apiclient,
                 self.testdata["volume"],
                 zoneid=self.zone.id,
                 account=self.account.name,
                 domainid=self.account.domainid,
                 diskofferingid=self.disk_offering.id
             )
    -        self.assertNotEqual(
    -            data_volume,
    -            None,
    -            "Check if volume gets created"
    -        )
     
    -        ISO = Iso.create(
    -            self.userapiclient,
    +        Iso.create(
    +            self.apiclient,
                 self.testdata["iso2"],
                 zoneid=self.zone.id,
                 account=self.account.name,
                 domainid=self.account.domainid,
             )
     
    -        self.assertNotEqual(
    -            ISO,
    -            None,
    -            "Check if volume gets created"
    +        with self.assertRaises(Exception):
    +            VirtualMachine.create(self.userapiclient,
    +                                  self.testdata["small"],
    +                                  templateid=self.template.id,
    +                                  accountid=self.account.name,
    +                                  domainid=self.account.domainid,
    +                                  serviceofferingid=self.service_offering.id,
    +                                  zoneid=self.zone.id,
    +                                  )
    +
    +        root_volume = list_volumes(
    +            self.userapiclient,
    +            virtualmachineid=vm_user.id,
    +            type='ROOT',
    +            listall=True
             )
    -        user_vm_new.delete(self.apiclient)
     
    -        # Step 3
    -        # Deletion of zone should fail if vm,volume is present on the zone
             with self.assertRaises(Exception):
    -            self.zone.delete(self.apiclient)
    +            Snapshot.create(
    +                self.userapiclient,
    +                root_volume[0].id)
    +
    +        with self.assertRaises(Exception):
    +            Template.register(
    +                self.userapiclient,
    +                self.testdata["privatetemplate"],
    +                zoneid=self.zone.id)
    +
    +        with self.assertRaises(Exception):
    +            Volume.create(
    +                self.userapiclient,
    +                self.testdata["volume"],
    +                zoneid=self.zone.id,
    +                account=self.account.name,
    +                domainid=self.account.domainid,
    +                diskofferingid=self.disk_offering.id
    +            )
    +
    +        with self.assertRaises(Exception):
    +            Iso.create(
    +                self.userapiclient,
    +                self.testdata["iso2"],
    +                zoneid=self.zone.id,
    +                account=self.account.name,
    +                domainid=self.account.domainid,
    +            )
    +
    +        # Step 2
    +        cmd.allocationstate = ENABLED
    +        self.apiclient.updatePod(cmd)
    +        podList = Pod.list(self.apiclient, id=self.pod.id)
    +
    +        self.assertEqual(podList[0].allocationstate,
    +                         ENABLED,
    +                         "Check if the pod is in enabled state"
    +                         )
    +
    +        root_vm_new = VirtualMachine.create(
    +            self.apiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id,
    +        )
    +        self.assertNotEqual(root_vm_new.state,
    +                            RUNNING,
    +                            "Verify that admin should be able \
    +                                    to create new VM")
    +
    +        Snapshot.create(
    +            self.apiclient,
    +            root_volume[0].id)
    +
    +        snapshots = list_snapshots(
    +            self.apiclient,
    +            volumeid=root_volume[0].id,
    +            listall=True)
    +
    +        Template.create_from_snapshot(
    +            self.apiclient,
    +            snapshots[0],
    +            self.testdata["privatetemplate"])
    +
    +        Template.register(
    +            self.apiclient,
    +            self.testdata["privatetemplate"],
    +            zoneid=self.zone.id)
    +
    +        Volume.create(
    +            self.apiclient,
    +            self.testdata["volume"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +            diskofferingid=self.disk_offering.id
    +        )
    +
    +        Iso.create(
    +            self.apiclient,
    +            self.testdata["iso2"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +        )
    +
    +        # Non root user
    +        user_vm_new = VirtualMachine.create(
    +            self.userapiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id,
    +        )
    +        self.assertNotEqual(user_vm_new.state,
    +                            RUNNING,
    +                            "Verify that admin should create new VM")
    +
    +        Snapshot.create(
    +            self.userapiclient,
    +            root_volume[0].id)
    +
    +        snapshots = list_snapshots(
    +            self.userapiclient,
    +            volumeid=root_volume[0].id,
    +            listall=True)
    +
    +        Template.register(
    +            self.userapiclient,
    +            self.testdata["privatetemplate"],
    +            zoneid=self.zone.id)
    +
    +        Volume.create(
    +            self.userapiclient,
    +            self.testdata["volume"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +            diskofferingid=self.disk_offering.id
    +        )
    +
    +        Iso.create(
    +            self.userapiclient,
    +            self.testdata["iso2"],
    +            zoneid=self.zone.id,
    +            account=self.account.name,
    +            domainid=self.account.domainid,
    +        )
    +
    +        user_vm_new.delete(self.apiclient)
    +        # Step 3
    +        # Deletion of zone should fail if resources are running on the zone
    +        with self.assertRaises(Exception):
    +            self.pod.delete(self.apiclient)
    +
    +        return
    +
    +
    +class TestDisableEnableCluster(cloudstackTestCase):
    +
    +    @classmethod
    +    def setUpClass(cls):
    +        testClient = super(TestDisableEnableCluster, cls).getClsTestClient()
    +        cls.apiclient = testClient.getApiClient()
    +        cls.testdata = testClient.getParsedTestDataConfig()
    +        cls.hypervisor = cls.testClient.getHypervisorInfo()
    +
    +        # Get Zone, Domain and templates
    +        cls.domain = get_domain(cls.apiclient)
    +        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
    +        cls.pod = get_pod(
    +            cls.apiclient,
    +            zone_id=cls.zone.id)
    +
    +        hostList = Host.list(cls.apiclient, zoneid=cls.zone.id, type="routing")
    +        clusterList = Cluster.list(cls.apiclient, id=hostList[0].clusterid)
    +        cls.cluster = Cluster(clusterList[0].__dict__)
    +
    +        cls.template = get_template(
    +            cls.apiclient,
    +            cls.zone.id,
    +            cls.testdata["ostype"])
    +
    +        cls._cleanup = []
    +
    +        try:
    +            cls.service_offering = ServiceOffering.create(
    +                cls.apiclient,
    +                cls.testdata["service_offering"],
    +            )
    +            cls._cleanup.append(cls.service_offering)
    +
    +            cls.disk_offering = DiskOffering.create(
    +                cls.apiclient,
    +                cls.testdata["disk_offering"],
    +            )
    +            cls._cleanup.append(cls.disk_offering)
    +
    +            # Create an account
    +            cls.account = Account.create(
    +                cls.apiclient,
    +                cls.testdata["account"],
    +                domainid=cls.domain.id
    +            )
    +            cls._cleanup.append(cls.account)
    +
    +            # Create user api client of the account
    +            cls.userapiclient = testClient.getUserApiClient(
    +                UserName=cls.account.name,
    +                DomainName=cls.account.domain
    +            )
    +            cls.vm_list = []
    +
    +        except Exception as e:
    +            cls.tearDownClass()
    +            raise e
    +        return
    +
    +    @classmethod
    +    def tearDownClass(cls):
    +        try:
    +            clusterList = Cluster.list(cls.apiclient, id=cls.cluster.id)
    +            if clusterList[0].allocationstate == DISABLED:
    +                cmd = updateCluster.updateClusterCmd()
    +                cmd.id = clusterList[0].id
    +                cmd.allocationstate = ENABLED
    +                cls.apiclient.updateCluster(cmd)
    +
    +            if clusterList[0].managedstate == "Unmanaged":
    +                cmd = updateCluster.updateClusterCmd()
    +                cmd.id = clusterList[0].id
    +                cmd.managedstate = "Managed"
    +                cls.apiclient.updateCluster(cmd)
    +
    +            cleanup_resources(cls.apiclient, cls._cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +
    +    def setUp(self):
    +        self.apiclient = self.testClient.getApiClient()
    +        self.dbclient = self.testClient.getDbConnection()
    +        self.cleanup = []
    +
    +    def tearDown(self):
    +        try:
    +            cleanup_resources(self.apiclient, self.cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +        return
    +
    +    @attr(tags=["advanced", "basic"], required_hardware="true")
    +    def test_01_disable_enable_cluster(self):
    +        """disable enable cluster
    +            1. Disable cluster and verify following things:
    +                For admin user:
    +                     --Should be able to create new vm, snapshot,
    +                     volume,template,iso in the same cluster
    +                For Non-admin user:
    +                     --Should not be able create new vm, snapshot,
    +                     volume,template,iso in the same cluster
    +            2. Enable the above disabled cluster and verify that:
    +                -All users should be create to deploy new vm, snapshot,
    +                volume,template,iso in the same cluster
    +            3. Disable the managestate of the cluster and verify that:
    +                --Host in the cluster should get disconnected
    +                --VM's in the cluster are ping-able and ssh to
    +                --Creation of new VM in the cluster should fail
    +            4. Enable the managestate of the cluster and verify that:
    +                --Hosts in the cluster get connected
    +                --VM's in the cluster are accessible
    +            5. Try to delete the cluster and it should fail with error message:
    +                -"The cluster is not deletable because there are
    +                servers running in this cluster"
    +
    +        """
    +        # Step 1
    +        vm_user = VirtualMachine.create(
    +            self.userapiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id,
    +        )
    +
    +        self.vm_list.append(vm_user)
    +
    +        vm_root = VirtualMachine.create(
    +            self.apiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering.id,
    +            zoneid=self.zone.id,
    +        )
    +
    +        self.vm_list.append(vm_root)
    +
    +        cmd = updateCluster.updateClusterCmd()
    +        cmd.id = self.cluster.id
    +        cmd.allocationstate = DISABLED
    +        self.apiclient.updateCluster(cmd)
    +        clusterList = Cluster.list(self.apiclient, id=self.cluster.id)
    +
    +        self.assertEqual(clusterList[0].allocationstate,
    +                         DISABLED,
    +                         "Check if the cluster is in disabled state"
    +                         )
    +        # Verify the exsisting vms should be running
    +        self.assertEqual(vm_user.state,
    +                         RUNNING,
    --- End diff --
    
    Use lowercase


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

Mime
View raw message