cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject git commit: updated refs/heads/master to 7332e88
Date Thu, 11 Feb 2016 19:42:13 GMT
Repository: cloudstack
Updated Branches:
  refs/heads/master cc8508d80 -> 7332e88ec


Revert "Merge pull request #667 from pritisarap12/CLOUDSTACK-8717-Failed-to-start-instance-after-restoring-the-running-instance"

This reverts commit cc8508d808b5c68bb3133e8d6c966c0df1bb9532, reversing
changes made to d3dc053b7d0da74b62121379dc7d4c9cf95dbf4e.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/7332e88e
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/7332e88e
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/7332e88e

Branch: refs/heads/master
Commit: 7332e88ec935f7bdcec4e0659dfac88d537d898a
Parents: cc8508d
Author: Remi Bergsma <github@remi.nl>
Authored: Thu Feb 11 20:31:00 2016 +0100
Committer: Remi Bergsma <github@remi.nl>
Committed: Thu Feb 11 20:31:00 2016 +0100

----------------------------------------------------------------------
 .../testpaths/testpath_restore_vm.py            | 211 -------------------
 1 file changed, 211 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7332e88e/test/integration/testpaths/testpath_restore_vm.py
----------------------------------------------------------------------
diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py
deleted file mode 100644
index d73499a..0000000
--- a/test/integration/testpaths/testpath_restore_vm.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""
-Test restore running VM on VMWare with one cluster having 2 Primary Storage
-"""
-
-
-from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.lib.utils import cleanup_resources, validateList
-from marvin.lib.base import (Account,
-                             ServiceOffering,
-                             VirtualMachine,
-                             StoragePool
-                             )
-from marvin.lib.common import (get_domain,
-                               get_zone,
-                               get_template,
-                               list_volumes,
-                               list_virtual_machines
-                               )
-
-from marvin.codes import CLUSTERTAG1, ROOT, PASS
-import time
-
-
-class TestRestoreVM(cloudstackTestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        testClient = super(TestRestoreVM, cls).getClsTestClient()
-        cls.apiclient = testClient.getApiClient()
-        cls.testdata = testClient.getParsedTestDataConfig()
-        cls.hypervisor = cls.testClient.getHypervisorInfo()
-
-        # Get Zone, Domain and templates
-        cls.domain = get_domain(cls.apiclient)
-        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
-
-        cls.template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.testdata["ostype"])
-
-        cls._cleanup = []
-
-        try:
-            cls.skiptest = False
-            if cls.hypervisor.lower() not in ["vmware"]:
-                cls.skiptest = True
-                return
-
-            # Create an account
-            cls.account = Account.create(
-                cls.apiclient,
-                cls.testdata["account"],
-                domainid=cls.domain.id
-            )
-            cls._cleanup.append(cls.account)
-            # Create user api client of the account
-            cls.userapiclient = testClient.getUserApiClient(
-                UserName=cls.account.name,
-                DomainName=cls.account.domain
-            )
-            # Create Service offering
-            cls.service_offering_cwps = ServiceOffering.create(
-                cls.apiclient,
-                cls.testdata["service_offering"],
-                tags=CLUSTERTAG1
-            )
-            cls._cleanup.append(cls.service_offering_cwps)
-        except Exception as e:
-            cls.tearDownClass()
-            raise e
-        return
-
-    @classmethod
-    def tearDownClass(cls):
-        try:
-            cleanup_resources(cls.apiclient, cls._cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-
-    def setUp(self):
-
-        self.cleanup = []
-        if self.skiptest:
-            self.skipTest("This test is to be checked on VMWare only \
-                    Hence, skip for %s" % self.hypervisor)
-
-        self.apiclient = self.testClient.getApiClient()
-        self.dbclient = self.testClient.getDbConnection()
-
-    def tearDown(self):
-        try:
-            if self.pools:
-                StoragePool.update(
-                    self.apiclient,
-                    id=self.pools[0].id,
-                    tags="")
-
-            cleanup_resources(self.apiclient, self.cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        return
-
-    @attr(tags=["advanced", "basic"], required_hardware="true")
-    def test_01_recover_VM(self):
-        """ Test Restore VM on VMWare
-            1. Deploy a VM without datadisk
-            2. Restore the VM
-            3. Verify that VM comes up in Running state
-        """
-        try:
-            self.pools = StoragePool.list(
-                self.apiclient,
-                zoneid=self.zone.id,
-                scope="CLUSTER")
-
-            status = validateList(self.pools)
-
-            # Step 3
-            self.assertEqual(
-                status[0],
-                PASS,
-                "Check: Failed to list  cluster wide storage pools")
-
-            if len(self.pools) < 2:
-                self.skipTest("There must be at atleast two cluster wide\
-                storage pools available in the setup")
-
-        except Exception as e:
-            self.skipTest(e)
-
-        # Adding tags to Storage Pools
-        cluster_no = 1
-        StoragePool.update(
-            self.apiclient,
-            id=self.pools[0].id,
-            tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
-
-        self.vm = VirtualMachine.create(
-            self.apiclient,
-            self.testdata["small"],
-            accountid=self.account.name,
-            templateid=self.template.id,
-            domainid=self.account.domainid,
-            serviceofferingid=self.service_offering_cwps.id,
-            zoneid=self.zone.id,
-        )
-        # Step 2
-
-        volumes_root_list = list_volumes(
-            self.apiclient,
-            virtualmachineid=self.vm.id,
-            type=ROOT,
-            listall=True
-        )
-
-        root_volume = volumes_root_list[0]
-
-        # Restore VM till its ROOT disk is recreated on onother Primary Storage
-        while True:
-            self.vm.restore(self.apiclient)
-            volumes_root_list = list_volumes(
-                self.apiclient,
-                virtualmachineid=self.vm.id,
-                type=ROOT,
-                listall=True
-            )
-
-            root_volume = volumes_root_list[0]
-
-            if root_volume.storage != self.pools[0].name:
-                break
-
-        # Step 3
-        vm_list = list_virtual_machines(
-            self.apiclient,
-            id=self.vm.id)
-
-        state = vm_list[0].state
-        i = 0
-        while(state != "Running"):
-            vm_list = list_virtual_machines(
-                self.apiclient,
-                id=self.vm.id)
-
-            time.sleep(10)
-            i = i + 1
-            state = vm_list[0].state
-            if i >= 10:
-                self.fail("Restore VM Failed")
-                break
-
-        return


Mime
View raw message