Return-Path: X-Original-To: apmail-cloudstack-dev-archive@www.apache.org Delivered-To: apmail-cloudstack-dev-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 444F21862E for ; Wed, 19 Aug 2015 04:40:08 +0000 (UTC) Received: (qmail 29918 invoked by uid 500); 19 Aug 2015 04:40:07 -0000 Delivered-To: apmail-cloudstack-dev-archive@cloudstack.apache.org Received: (qmail 29851 invoked by uid 500); 19 Aug 2015 04:40:07 -0000 Mailing-List: contact dev-help@cloudstack.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cloudstack.apache.org Delivered-To: mailing list dev@cloudstack.apache.org Received: (qmail 29839 invoked by uid 99); 19 Aug 2015 04:40:07 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 19 Aug 2015 04:40:07 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id CC08AE0941; Wed, 19 Aug 2015 04:40:06 +0000 (UTC) From: nitt10prashant To: dev@cloudstack.apache.org Reply-To: dev@cloudstack.apache.org References: In-Reply-To: Subject: [GitHub] cloudstack pull request: CLOUDSTACK-8745 : verify usage after root... Content-Type: text/plain Message-Id: <20150819044006.CC08AE0941@git1-us-west.apache.org> Date: Wed, 19 Aug 2015 04:40:06 +0000 (UTC) Github user nitt10prashant commented on a diff in the pull request: https://github.com/apache/cloudstack/pull/713#discussion_r37379426 --- Diff: test/integration/component/maint/test_ha_pool_maintenance.py --- @@ -0,0 +1,229 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (enableStorageMaintenance, + cancelStorageMaintenance + ) +from marvin.lib.utils import (cleanup_resources, + validateList) +from marvin.lib.base import (Account, + VirtualMachine, + ServiceOffering, + Cluster, + StoragePool, + Volume) +from marvin.lib.common import (get_zone, + get_domain, + get_template, + list_hosts + ) +from marvin.codes import PASS + + +def maintenance(self, storageid): + """enables maintenance mode of a Storage pool""" + + cmd = enableStorageMaintenance.enableStorageMaintenanceCmd() + cmd.id = storageid + return self.api_client.enableStorageMaintenance(cmd) + + +def cancelmaintenance(self, storageid): + """cancel maintenance mode of a Storage pool""" + + cmd = cancelStorageMaintenance.cancelStorageMaintenanceCmd() + cmd.id = storageid + return self.api_client.cancelStorageMaintenance(cmd) + + +class testHaPoolMaintenance(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + try: + cls._cleanup = [] + cls.testClient = super( + testHaPoolMaintenance, + cls).getClsTestClient() + cls.api_client = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + # Get Domain, Zone, Template + cls.domain = get_domain(cls.api_client) + cls.zone = get_zone( + cls.api_client, + cls.testClient.getZoneForTests()) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.hypervisor = cls.testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + cls.hypervisor = cls.testClient.getHypervisorInfo() + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + cls.clusterWithSufficientPool = None + clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id) + + if not validateList(clusters)[0]: + + cls.debug( + "check list cluster response for zone id %s" % + cls.zone.id) + + for cluster in clusters: + cls.pool = StoragePool.list(cls.api_client, + clusterid=cluster.id, + keyword="NetworkFilesystem" + ) + + if not validateList(cls.pool)[0]: + + cls.debug( + "check list cluster response for zone id %s" % + cls.zone.id) + + if len(cls.pool) >= 2: + cls.clusterWithSufficientPool = cluster + break + if not cls.clusterWithSufficientPool: + return + + cls.services["service_offerings"][ + "tiny"]["offerha"] = "True" + + cls.services_off = ServiceOffering.create( + cls.api_client, + cls.services["service_offerings"]["tiny"]) + cls._cleanup.append(cls.services_off) + + except Exception as e: + cls.tearDownClass() + raise Exception("Warning: Exception in setup : %s" % e) + return + + def setUp(self): + + self.apiClient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + if not self.clusterWithSufficientPool: + self.skipTest( + "sufficient storage not available in any cluster for zone %s" % + self.zone.id) + self.account = Account.create( + self.api_client, + self.services["account"], + domainid=self.domain.id + ) + self.cleanup.append(self.account) + + def tearDown(self): + # Clean up, terminate the created resources + cancelmaintenance(self, storageid=self.storageid[0][0]) + cleanup_resources(self.apiClient, self.cleanup) + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + return + + @attr(tags=["advanced", "cl", "advancedns", "sg", + "basic", "eip", "simulator", "multihost"]) + def test_ha_with_storage_maintenance(self): + """put storage in maintenance mode and start ha vm and check usage""" + # Steps + # 1. Create a Compute service offering with the 'Offer HA' option + # selected. + # 2. Create a Guest VM with the compute service offering created above. + # 3. put PS into maintenance mode + # 4. vm should go in stop state + # 5. start vm ,vm should come up on another storage + # 6. check usage events are getting generated for root disk + + host = list_hosts( + self.api_client, + clusterid=self.clusterWithSufficientPool.id) + self.assertEqual(validateList(host)[0], + PASS, + "check list host response for cluster id %s" + % self.clusterWithSufficientPool.id) + + self.virtual_machine_with_ha = VirtualMachine.create( + self.api_client, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.services_off.id, + hostid=host[0].id + ) + --- End diff -- issue was with only root volume , usage was getting generated for data disk . --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastructure@apache.org or file a JIRA ticket with INFRA. ---