cloudstack-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (CLOUDSTACK-8745) After a volume is migrated; the usage table still shows the old volume id
Date Wed, 19 Aug 2015 05:43:46 GMT

    [ https://issues.apache.org/jira/browse/CLOUDSTACK-8745?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14702527#comment-14702527
] 

ASF GitHub Bot commented on CLOUDSTACK-8745:
--------------------------------------------

Github user ksowmya commented on a diff in the pull request:

    https://github.com/apache/cloudstack/pull/713#discussion_r37381494
  
    --- Diff: test/integration/component/maint/test_ha_pool_maintenance.py ---
    @@ -0,0 +1,229 @@
    +#!/usr/bin/env python
    +# Licensed to the Apache Software Foundation (ASF) under one
    +# or more contributor license agreements.  See the NOTICE file
    +# distributed with this work for additional information
    +# regarding copyright ownership.  The ASF licenses this file
    +# to you under the Apache License, Version 2.0 (the
    +# "License"); you may not use this file except in compliance
    +# with the License.  You may obtain a copy of the License at
    +#
    +#   http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing,
    +# software distributed under the License is distributed on an
    +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    +# KIND, either express or implied.  See the License for the
    +# specific language governing permissions and limitations
    +# under the License.
    +
    +from nose.plugins.attrib import attr
    +from marvin.cloudstackTestCase import cloudstackTestCase
    +from marvin.cloudstackAPI import (enableStorageMaintenance,
    +                                  cancelStorageMaintenance
    +                                  )
    +from marvin.lib.utils import (cleanup_resources,
    +                              validateList)
    +from marvin.lib.base import (Account,
    +                             VirtualMachine,
    +                             ServiceOffering,
    +                             Cluster,
    +                             StoragePool,
    +                             Volume)
    +from marvin.lib.common import (get_zone,
    +                               get_domain,
    +                               get_template,
    +                               list_hosts
    +                               )
    +from marvin.codes import PASS
    +
    +
    +def maintenance(self, storageid):
    +    """enables maintenance mode of a Storage pool"""
    +
    +    cmd = enableStorageMaintenance.enableStorageMaintenanceCmd()
    +    cmd.id = storageid
    +    return self.api_client.enableStorageMaintenance(cmd)
    +
    +
    +def cancelmaintenance(self, storageid):
    +    """cancel maintenance mode of a Storage pool"""
    +
    +    cmd = cancelStorageMaintenance.cancelStorageMaintenanceCmd()
    +    cmd.id = storageid
    +    return self.api_client.cancelStorageMaintenance(cmd)
    +
    +
    +class testHaPoolMaintenance(cloudstackTestCase):
    +
    +    @classmethod
    +    def setUpClass(cls):
    +        try:
    +            cls._cleanup = []
    +            cls.testClient = super(
    +                testHaPoolMaintenance,
    +                cls).getClsTestClient()
    +            cls.api_client = cls.testClient.getApiClient()
    +            cls.services = cls.testClient.getParsedTestDataConfig()
    +            # Get Domain, Zone, Template
    +            cls.domain = get_domain(cls.api_client)
    +            cls.zone = get_zone(
    +                cls.api_client,
    +                cls.testClient.getZoneForTests())
    +            cls.template = get_template(
    +                cls.api_client,
    +                cls.zone.id,
    +                cls.services["ostype"]
    +            )
    +            cls.hypervisor = cls.testClient.getHypervisorInfo()
    +            cls.services['mode'] = cls.zone.networktype
    +            cls.hypervisor = cls.testClient.getHypervisorInfo()
    +            cls.services["virtual_machine"]["zoneid"] = cls.zone.id
    +            cls.services["virtual_machine"]["template"] = cls.template.id
    +            cls.clusterWithSufficientPool = None
    +            clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
    +
    +            if not validateList(clusters)[0]:
    +
    +                cls.debug(
    +                    "check list cluster response for zone id %s" %
    +                    cls.zone.id)
    +
    +            for cluster in clusters:
    +                cls.pool = StoragePool.list(cls.api_client,
    +                                            clusterid=cluster.id,
    +                                            keyword="NetworkFilesystem"
    +                                            )
    +
    +                if not validateList(cls.pool)[0]:
    +
    +                    cls.debug(
    +                        "check list cluster response for zone id %s" %
    +                        cls.zone.id)
    +
    +                if len(cls.pool) >= 2:
    +                    cls.clusterWithSufficientPool = cluster
    +                    break
    +            if not cls.clusterWithSufficientPool:
    +                return
    +
    +            cls.services["service_offerings"][
    +                "tiny"]["offerha"] = "True"
    +
    +            cls.services_off = ServiceOffering.create(
    +                                  cls.api_client,
    +                                  cls.services["service_offerings"]["tiny"])
    +            cls._cleanup.append(cls.services_off)
    +
    +        except Exception as e:
    +            cls.tearDownClass()
    +            raise Exception("Warning: Exception in setup : %s" % e)
    +        return
    +
    +    def setUp(self):
    +
    +        self.apiClient = self.testClient.getApiClient()
    +        self.dbclient = self.testClient.getDbConnection()
    +        self.cleanup = []
    +        if not self.clusterWithSufficientPool:
    +            self.skipTest(
    +                "sufficient storage not available in any cluster for zone %s" %
    +                self.zone.id)
    +        self.account = Account.create(
    +            self.api_client,
    +            self.services["account"],
    +            domainid=self.domain.id
    +        )
    +        self.cleanup.append(self.account)
    +
    +    def tearDown(self):
    +        # Clean up, terminate the created resources
    +        cancelmaintenance(self, storageid=self.storageid[0][0])
    +        cleanup_resources(self.apiClient, self.cleanup)
    +        return
    +
    +    @classmethod
    +    def tearDownClass(cls):
    +        try:
    +            cleanup_resources(cls.api_client, cls._cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +
    +        return
    +
    +    @attr(tags=["advanced", "cl", "advancedns", "sg",
    +                "basic", "eip", "simulator", "multihost"])
    +    def test_ha_with_storage_maintenance(self):
    +        """put storage in maintenance mode and start ha vm and check usage"""
    +        # Steps
    +        # 1. Create a Compute service offering with the 'Offer HA' option
    +        # selected.
    +        # 2. Create a Guest VM with the compute service offering created above.
    +        # 3. put PS into maintenance  mode
    +        # 4. vm should go in stop state
    +        # 5. start vm ,vm should come up on another storage
    +        # 6. check usage events are getting generated for root disk
    +
    +        host = list_hosts(
    +            self.api_client,
    +            clusterid=self.clusterWithSufficientPool.id)
    +        self.assertEqual(validateList(host)[0],
    +                         PASS,
    +                         "check list host response for cluster id %s"
    +                         % self.clusterWithSufficientPool.id)
    +
    +        self.virtual_machine_with_ha = VirtualMachine.create(
    +            self.api_client,
    +            self.services["virtual_machine"],
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.services_off.id,
    +            hostid=host[0].id
    +        )
    +
    --- End diff --
    
    Makes sense. Could you please open a task/bug for that so that it can be tracked and worked
on separately?


> After a volume is migrated; the usage table still shows the old volume id
> -------------------------------------------------------------------------
>
>                 Key: CLOUDSTACK-8745
>                 URL: https://issues.apache.org/jira/browse/CLOUDSTACK-8745
>             Project: CloudStack
>          Issue Type: Bug
>      Security Level: Public(Anyone can view this level - this is the default.) 
>          Components: marvin, Usage
>    Affects Versions: 4.5.0
>            Reporter: prashant kumar mishra
>            Assignee: prashant kumar mishra
>
> After a volume is migrated; the usage table still shows the old volume id
> steps to verify:
> ==========
> 1-Created a HA VM with both root and data disk
> 2. Add one more primary storage to this cluster
> 3. Put the original /old Primary storage into maintenance which has the root and data
disk of VM created in step 1
> 4. Start the VM which was stopped as a part of step 3
> 5. Check for VOLUME.DELETE & VOLUME.CREATE events for root disk and data disk in
usage_event table



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Mime
View raw message