cloudstack-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (CLOUDSTACK-8717) Failed to start instance after restoring the running instance
Date Mon, 10 Aug 2015 05:02:45 GMT

    [ https://issues.apache.org/jira/browse/CLOUDSTACK-8717?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14679565#comment-14679565
] 

ASF GitHub Bot commented on CLOUDSTACK-8717:
--------------------------------------------

Github user sanju1010 commented on a diff in the pull request:

    https://github.com/apache/cloudstack/pull/667#discussion_r36602070
  
    --- Diff: test/integration/testpaths/testpath_restore_vm.py ---
    @@ -0,0 +1,192 @@
    +# Licensed to the Apache Software Foundation (ASF) under one
    +# or more contributor license agreements.  See the NOTICE file
    +# distributed with this work for additional information
    +# regarding copyright ownership.  The ASF licenses this file
    +# to you under the Apache License, Version 2.0 (the
    +# "License"); you may not use this file except in compliance
    +# with the License.  You may obtain a copy of the License at
    +#
    +#   http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing,
    +# software distributed under the License is distributed on an
    +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    +# KIND, either express or implied.  See the License for the
    +# specific language governing permissions and limitations
    +# under the License.
    +
    +"""
    +Test restore running VM on VMWare with one cluster having 2 Primary Storage
    +"""
    +
    +
    +from nose.plugins.attrib import attr
    +from marvin.cloudstackTestCase import cloudstackTestCase
    +from marvin.lib.utils import cleanup_resources
    +from marvin.lib.base import (Account,
    +                             ServiceOffering,
    +                             VirtualMachine,
    +                             StoragePool
    +                             )
    +from marvin.lib.common import (get_domain,
    +                               get_zone,
    +                               get_template,
    +                               list_volumes
    +                               )
    +
    +from marvin.codes import CLUSTERTAG1, ROOT
    +import time
    +
    +
    +class TestRestoreVM(cloudstackTestCase):
    +
    +    @classmethod
    +    def setUpClass(cls):
    +        testClient = super(TestRestoreVM, cls).getClsTestClient()
    +        cls.apiclient = testClient.getApiClient()
    +        cls.testdata = testClient.getParsedTestDataConfig()
    +        cls.hypervisor = cls.testClient.getHypervisorInfo()
    +
    +        # Get Zone, Domain and templates
    +        cls.domain = get_domain(cls.apiclient)
    +        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
    +
    +        cls.template = get_template(
    +            cls.apiclient,
    +            cls.zone.id,
    +            cls.testdata["ostype"])
    +
    +        cls._cleanup = []
    +
    +        try:
    +            cls.skiptest = False
    +            if cls.hypervisor.lower() not in ["vmware"]:
    +                cls.skiptest = True
    +                return
    +
    +            # Create an account
    +            cls.account = Account.create(
    +                cls.apiclient,
    +                cls.testdata["account"],
    +                domainid=cls.domain.id
    +            )
    +            cls._cleanup.append(cls.account)
    +            # Create user api client of the account
    +            cls.userapiclient = testClient.getUserApiClient(
    +                UserName=cls.account.name,
    +                DomainName=cls.account.domain
    +            )
    +            # Create Service offering
    +            cls.service_offering_cwps = ServiceOffering.create(
    +                cls.apiclient,
    +                cls.testdata["service_offering"],
    +                tags=CLUSTERTAG1
    +            )
    +            cls._cleanup.append(cls.service_offering_cwps)
    +        except Exception as e:
    +            cls.tearDownClass()
    +            raise e
    +        return
    +
    +    @classmethod
    +    def tearDownClass(cls):
    +        try:
    +            cleanup_resources(cls.apiclient, cls._cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +
    +    def setUp(self):
    +
    +        self.cleanup = []
    +        if self.skiptest:
    +            self.skipTest("This test is to be checked on VMWare only \
    +                    Hence, skip for %s" % self.hypervisor)
    +
    +        self.apiclient = self.testClient.getApiClient()
    +        self.dbclient = self.testClient.getDbConnection()
    +
    +    def tearDown(self):
    +        try:
    +            cleanup_resources(self.apiclient, self.cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +        return
    +
    +    @attr(tags=["advanced", "basic"], required_hardware="false")
    +    def test_01_recover_VM(self):
    +        """ Test Restore VM on VMWare
    +            1. Deploy a VM without datadisk
    +            2. Restore the VM
    +            3. Verify that VM comes up in Running state
    +        """
    +        try:
    +            self.pools = StoragePool.list(
    +                self.apiclient,
    +                zoneid=self.zone.id,
    +                scope=CLUSTER)
    +
    +            if len(self.pools) < 2:
    +                self.skipTest("There must be at atleast two cluster wide\
    +                storage pools available in the setup")
    +
    +        except Exception as e:
    +            self.skipTest(e)
    +
    +        # Adding tags to Storage Pools
    +        cluster_no = 1
    +        self.debug("Storage Pools: %s" % self.pools)
    +        for storagePool in self.pools:
    +            if storagePool.scope == "CLUSTER":
    +                StoragePool.update(
    +                    self.apiclient,
    +                    id=storagePool.id,
    +                    tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
    +                cluster_no += 1
    +
    +        self.vm = VirtualMachine.create(
    +            self.apiclient,
    +            self.testdata["small"],
    +            accountid=self.account.name,
    +            templateid=self.template.id,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering_cwps.id,
    +            zoneid=self.zone.id,
    +        )
    +        # Step 2
    +
    +        volumes_root_list = list_volumes(
    +            self.apiclient,
    +            virtualmachineid=self.vm.id,
    +            type=ROOT,
    +            listall=True
    +        )
    +
    +        root_volume = volumes_root_list[0]
    +
    +        # Restore VM till its ROOT disk is recreated on onother Primary Storage
    +        while True:
    +            self.vm.restore(self.apiclient)
    +            volumes_root_list = list_volumes(
    +                self.apiclient,
    +                virtualmachineid=self.vm.id,
    +                type=ROOT,
    +                listall=True
    +            )
    +
    +            root_volume = volumes_root_list[0]
    +
    +            if root_volume.storage != self.pools[0].name:
    +                break
    +
    +        # Step 3
    +        state = self.vm.state
    +        i = 0
    +        while(state != "Running"):
    +            time.sleep(10)
    +            i = i + 1
    +            state = self.vm.state
    --- End diff --
    
    Without listing the vm, how the vm.state is going to be updated? 


> Failed to start instance after restoring the running instance 
> --------------------------------------------------------------
>
>                 Key: CLOUDSTACK-8717
>                 URL: https://issues.apache.org/jira/browse/CLOUDSTACK-8717
>             Project: CloudStack
>          Issue Type: Bug
>      Security Level: Public(Anyone can view this level - this is the default.) 
>          Components: Automation
>    Affects Versions: 4.2.1
>            Reporter: Priti Sarap
>             Fix For: 4.2.1
>
>
> On setup with two cluster wide primary storage verify restoring a running instance.(As
while restoring instance root disk may get created on another primary storage.) 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Mime
View raw message