cloudstack-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (CLOUDSTACK-10380) changing passwordenabled to true while guest vm is running causes unexpected passwordreset again in startvm
Date Wed, 26 Sep 2018 14:22:02 GMT

    [ https://issues.apache.org/jira/browse/CLOUDSTACK-10380?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16628847#comment-16628847 ] 

ASF GitHub Bot commented on CLOUDSTACK-10380:
---------------------------------------------

PaulAngus closed pull request #2743: CLOUDSTACK-10380: Fix startvm giving another pw after pw reset
URL: https://github.com/apache/cloudstack/pull/2743
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
index b5aca5d9f10..5b27203a7dc 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
@@ -1336,7 +1336,7 @@ public boolean canUpdateInSequence(Network network, boolean forced){
 
         //check if the there are no service provider other than virtualrouter.
         for(Provider provider : providers) {
-            if (provider!=Provider.VirtualRouter)
+            if (provider != Provider.VirtualRouter)
                 throw new UnsupportedOperationException("Cannot update the network resources in sequence when providers other than virtualrouter are used");
         }
         //check if routers are in correct state before proceeding with the update
diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java
index d13234d1016..66f76387032 100644
--- a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java
+++ b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java
@@ -29,11 +29,11 @@
 import javax.annotation.PostConstruct;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
+
 import com.cloud.network.Network;
 import com.cloud.network.dao.NetworkDao;
 import com.cloud.network.dao.NetworkVO;
-import org.apache.log4j.Logger;
-
 import com.cloud.server.ResourceTag.ResourceObjectType;
 import com.cloud.tags.dao.ResourceTagDao;
 import com.cloud.user.Account;
@@ -368,9 +368,13 @@ public void saveDetails(UserVmVO vm) {
         if (detailsStr == null) {
             return;
         }
+
+        final Map<String, Boolean> visibilityMap = _detailsDao.listDetailsVisibility(vm.getId());
+
         List<UserVmDetailVO> details = new ArrayList<UserVmDetailVO>();
-        for (String key : detailsStr.keySet()) {
-            details.add(new UserVmDetailVO(vm.getId(), key, detailsStr.get(key), true));
+        for (Map.Entry<String, String> entry : detailsStr.entrySet()) {
+            boolean display = visibilityMap.getOrDefault(entry.getKey(), true);
+            details.add(new UserVmDetailVO(vm.getId(), entry.getKey(), entry.getValue(), display));
         }
 
         _detailsDao.saveDetails(details);
diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
index 5d2d919a685..3f340455bab 100644
--- a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
+++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
@@ -73,6 +73,8 @@
 
     public Map<String, String> listDetailsKeyPairs(long resourceId, boolean forDisplay);
 
+    Map<String, Boolean> listDetailsVisibility(long resourceId);
+
     public void saveDetails(List<R> details);
 
     public void addDetail(long resourceId, String key, String value, boolean display);
diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
index b3e7ea27587..7110541e3fc 100644
--- a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
+++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
@@ -27,7 +27,7 @@
 import com.cloud.utils.db.SearchCriteria;
 import com.cloud.utils.db.TransactionLegacy;
 
-public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends GenericDaoBase<R, Long> {
+public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends GenericDaoBase<R, Long> implements ResourceDetailsDao<R> {
     private SearchBuilder<R> AllFieldsSearch;
 
     public ResourceDetailsDaoBase() {
@@ -81,6 +81,18 @@ public R findDetail(long resourceId, String name) {
         return details;
     }
 
+    public Map<String, Boolean> listDetailsVisibility(long resourceId) {
+        SearchCriteria<R> sc = AllFieldsSearch.create();
+        sc.setParameters("resourceId", resourceId);
+
+        List<R> results = search(sc, null);
+        Map<String, Boolean> details = new HashMap<>(results.size());
+        for (R result : results) {
+            details.put(result.getName(), result.isDisplay());
+        }
+        return details;
+    }
+
     public List<R> listDetails(long resourceId) {
         SearchCriteria<R> sc = AllFieldsSearch.create();
         sc.setParameters("resourceId", resourceId);
diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java
index 9b481ed4a64..58d9343f633 100644
--- a/server/src/com/cloud/network/element/VirtualRouterElement.java
+++ b/server/src/com/cloud/network/element/VirtualRouterElement.java
@@ -24,14 +24,12 @@
 
 import javax.inject.Inject;
 
-import com.cloud.utils.net.NetUtils;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.beans.factory.annotation.Qualifier;
-import com.cloud.network.router.NetworkHelper;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.log4j.Logger;
 import org.cloud.network.router.deployment.RouterDeploymentDefinition;
 import org.cloud.network.router.deployment.RouterDeploymentDefinitionBuilder;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Qualifier;
 
 import com.google.gson.Gson;
 
@@ -83,6 +81,7 @@
 import com.cloud.network.dao.VirtualRouterProviderDao;
 import com.cloud.network.lb.LoadBalancingRule;
 import com.cloud.network.lb.LoadBalancingRulesManager;
+import com.cloud.network.router.NetworkHelper;
 import com.cloud.network.router.VirtualRouter;
 import com.cloud.network.router.VirtualRouter.Role;
 import com.cloud.network.router.VpcVirtualNetworkApplianceManager;
@@ -103,6 +102,7 @@
 import com.cloud.utils.db.QueryBuilder;
 import com.cloud.utils.db.SearchCriteria.Op;
 import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.net.NetUtils;
 import com.cloud.vm.DomainRouterVO;
 import com.cloud.vm.NicProfile;
 import com.cloud.vm.ReservationContext;
@@ -703,7 +703,14 @@ public boolean savePassword(final Network network, final NicProfile nic, final V
         // save the password in DB
         for (final VirtualRouter router : routers) {
             if (router.getState() == State.Running) {
-                return networkTopology.savePasswordToRouter(network, nic, uservm, router);
+                final boolean result = networkTopology.savePasswordToRouter(network, nic, uservm, router);
+                if (result) {
+                    // Explicit password reset, while VM hasn't generated a password yet.
+                    final UserVmVO userVmVO = _userVmDao.findById(vm.getId());
+                    userVmVO.setUpdateParameters(false);
+                    _userVmDao.update(userVmVO.getId(), userVmVO);
+                }
+                return result;
             }
         }
         final String password = (String) uservm.getParameter(VirtualMachineProfile.Param.VmPassword);
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 8e972155f80..7ba282f4b18 100644
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -39,6 +39,11 @@
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.log4j.Logger;
+
 import org.apache.cloudstack.acl.ControlledEntity.ACLType;
 import org.apache.cloudstack.acl.SecurityChecker.AccessType;
 import org.apache.cloudstack.affinity.AffinityGroupService;
@@ -91,10 +96,6 @@
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
@@ -688,10 +689,6 @@ public UserVm resetVMPassword(ResetVMPasswordCmd cmd, String password) throws Re
 
         if (result) {
             userVm.setPassword(password);
-            // update the password in vm_details table too
-            // Check if an SSH key pair was selected for the instance and if so
-            // use it to encrypt & save the vm password
-            encryptAndStorePassword(userVm, password);
         } else {
             throw new CloudRuntimeException("Failed to reset password for the virtual machine ");
         }
@@ -736,7 +733,6 @@ private boolean resetVMPasswordInternal(Long vmId, String password) throws Resou
             } else {
                 final UserVmVO userVm = _vmDao.findById(vmId);
                 _vmDao.loadDetails(userVm);
-                userVm.setPassword(password);
                 // update the password in vm_details table too
                 // Check if an SSH key pair was selected for the instance and if so
                 // use it to encrypt & save the vm password
@@ -850,8 +846,9 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKey, String pas
                 userVm.setPassword(password);
                 //update the encrypted password in vm_details table too
                 encryptAndStorePassword(userVm, password);
+            } else {
+                _vmDao.saveDetails(userVm);
             }
-            _vmDao.saveDetails(userVm);
 
             if (vmInstance.getState() == State.Stopped) {
                 s_logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of SSH Key reset");
@@ -4461,6 +4458,7 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) {
                     password = DBEncryptionUtil.decrypt(vm.getDetail("password"));
                 } else {
                     password = _mgr.generateRandomPassword();
+                    vm.setPassword(password);
                 }
             }
 
@@ -4499,11 +4497,10 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) {
             // this value is not being sent to the backend; need only for api
             // display purposes
             if (template.getEnablePassword()) {
-                vm.setPassword((String)vmParamPair.second().get(VirtualMachineProfile.Param.VmPassword));
-                vm.setUpdateParameters(false);
                 if (vm.getDetail("password") != null) {
-                    _vmDetailsDao.remove(_vmDetailsDao.findDetail(vm.getId(), "password").getId());
+                    _vmDetailsDao.removeDetail(vm.getId(), "password");
                 }
+                vm.setUpdateParameters(false);
                 _vmDao.update(vm.getId(), vm);
             }
         }
@@ -6180,7 +6177,7 @@ public UserVm restoreVMInternal(Account caller, UserVmVO vm, Long newTemplateId)
                             vm.setUpdateParameters(false);
                             _vmDao.loadDetails(vm);
                             if (vm.getDetail("password") != null) {
-                                _vmDetailsDao.remove(_vmDetailsDao.findDetail(vm.getId(), "password").getId());
+                                _vmDetailsDao.removeDetail(vm.getId(), "password");
                             }
                             _vmDao.update(vm.getId(), vm);
                         }
diff --git a/test/integration/component/test_configdrive.py b/test/integration/component/test_configdrive.py
index 3d4c7b5a52b..17ac3ae9281 100644
--- a/test/integration/component/test_configdrive.py
+++ b/test/integration/component/test_configdrive.py
@@ -19,11 +19,17 @@
     and password reset functionality with
     ConfigDrive
 """
+import base64
+import os
+import socket
 # Import Local Modules
+import subprocess
+import tempfile
+from contextlib import contextmanager
+
+import time
+from marvin.cloudstackAPI import (restartVPC)
 from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.cloudstackAPI import (resetSSHKeyForVirtualMachine,
-                                  updateTemplate,
-                                  restartVPC)
 from marvin.lib.base import (Account,
                              createVlanIpRange,
                              FireWallRule,
@@ -43,21 +49,17 @@
                              VirtualMachine,
                              VPC,
                              VpcOffering,
-                             Hypervisor)
+                             Hypervisor, Template)
 from marvin.lib.common import (get_domain,
                                get_template,
-                               get_zone,
-                               list_templates)
+                               get_zone, get_test_template)
 from marvin.lib.utils import random_gen
 # Import System Modules
 from nose.plugins.attrib import attr
 from retry import retry
-import tempfile
-import socket
-import base64
-import sys
-import time
-import os
+
+VPC_SERVICES = 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns'
+ISO_SERVICES = 'Dhcp,SourceNat,StaticNat,UserData,Firewall,Dns'
 
 NO_SUCH_FILE = "No such file or directory"
 
@@ -98,10 +100,23 @@ class Services:
 
     def __init__(self):
         self.services = {
+            "test_templates": {
+                "kvm": {
+                    "name": "Centos-5.5-configdrive",
+                    "displaytext": "ConfigDrive enabled CentOS",
+                    "format": "qcow2",
+                    "hypervisor": "kvm",
+                    "ostype": "CentOS 5.5 (64-bit)",
+                    "url": "http://people.apache.org/~fmaximus/centos55-extended.qcow2.bz2",
+                    "requireshvm": "False",
+                    "ispublic": "True",
+                    "isextractable": "True"
+                }
+            },
             "vpc_offering_configdrive": {
                 "name": 'VPC offering ConfigDrive',
                 "displaytext": 'VPC offering ConfigDrive',
-                "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns',
+                "supportedservices": VPC_SERVICES,
                 "serviceProviderList": {
                     "Dhcp": "VpcVirtualRouter",
                     "StaticNat": "VpcVirtualRouter",
@@ -115,7 +130,7 @@ def __init__(self):
                 "name": 'vpc_net_off_marvin_configdrive',
                 "displaytext": 'vpc_net_off_marvin_configdrive',
                 "guestiptype": 'Isolated',
-                "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns',
+                "supportedservices": VPC_SERVICES,
                 "traffictype": 'GUEST',
                 "availability": 'Optional',
                 "useVpc": 'on',
@@ -133,7 +148,7 @@ def __init__(self):
                 "name": 'isolated_configdrive_net_off_marvin',
                 "displaytext": 'isolated_configdrive_net_off_marvin',
                 "guestiptype": 'Isolated',
-                "supportedservices": 'Dhcp,SourceNat,StaticNat,UserData,Firewall,Dns',
+                "supportedservices": ISO_SERVICES,
                 "traffictype": 'GUEST',
                 "availability": 'Optional',
                 "tags": 'native',
@@ -171,7 +186,7 @@ def __init__(self):
                 "network_all_1": {
                     "name": "SharedNetwork-All-1",
                     "displaytext": "SharedNetwork-All-1",
-                    "vlan": "3998",
+                    "vlan": "4001",
                     "gateway": "10.200.100.1",
                     "netmask": "255.255.255.0",
                     "startip": "10.200.100.21",
@@ -181,7 +196,7 @@ def __init__(self):
                 "network_all_2": {
                     "name": "SharedNetwork2-All-2",
                     "displaytext": "SharedNetwork2-All-2",
-                    "vlan": "3999",
+                    "vlan": "4002",
                     "gateway": "10.200.200.1",
                     "netmask": "255.255.255.0",
                     "startip": "10.200.200.21",
@@ -193,6 +208,8 @@ def __init__(self):
 
 
 class ConfigDriveUtils:
+    template = None
+
     class CreateResult:
         def __init__(self, success, offering=None, network=None, vpc=None):
             self.success = success
@@ -201,33 +218,63 @@ def __init__(self, success, offering=None, network=None, vpc=None):
             self.vpc = vpc
 
     class PasswordTest:
-        def __init__(self, password):
-            self.test_presence = False
-            self.presence = None
+        def __init__(self, vm=None, expect_pw=None):
+            """
+            :param vm: vm
+            :param expect_pw: Is a password expected
+            """
+            self.presence = expect_pw
             self.password = None
-            if type(password) is bool:
-                self.test_presence = True
-                self.presence = password
-                self.password = None
-            elif type(password) is unicode or type(password) is str:
-                self.test_presence = True
-                self.password = password
+
+            if vm:
+                self.password = vm.password
                 self.presence = True
 
-    def updateTemplate(self, value):
+            self.test_presence = self.presence is not None
+
+        def __str__(self):
+            if self.test_presence:
+                return "PasswordTest(presence=%s, password=%s)" % \
+                       (self.presence, self.password)
+            else:
+                return "NoPasswordTest()"
+
+    def __init__(self):
+        self.offering = None
+        self.vpc = None
+        self.vpc_acl_list = None
+        self.vpc_acl_rule = None
+
+    @contextmanager
+    def stopped_vm(self, vm):
+        was_running = (vm.state == VirtualMachine.RUNNING)
+
+        if was_running:
+            vm.stop(self.api_client)
+            vm.state = VirtualMachine.STOPPED
+
+        yield
+
+        if was_running:
+            vm.start(self.api_client)
+            vm.state = VirtualMachine.RUNNING
+            vm.ssh_client = None
+
+    def update_template(self, **kwargs):
         """Updates value of the guest VM template's password enabled setting
+        :param passwordenabled:
+        :type passwordenabled: bool
         """
         self.debug("Updating value of guest VM template's password enabled "
                    "setting")
-        cmd = updateTemplate.updateTemplateCmd()
-        cmd.id = self.template.id
-        cmd.passwordenabled = value
-        self.api_client.updateTemplate(cmd)
-        list_template_response = list_templates(self.api_client,
-                                                templatefilter="all",
-                                                id=self.template.id
-                                                )
-        self.template = list_template_response[0]
+        if not isinstance(self.template, Template):
+            self.template = Template(self.template.__dict__)
+
+        Template.update(self.template, self.api_client, **kwargs)
+        response = Template.list(self.api_client,
+                                 templatefilter="all",
+                                 id=self.template.id)
+        self.template = Template(response[0].__dict__)
         self.debug("Updated guest VM template")
 
     def get_userdata_url(self, vm):
@@ -239,45 +286,78 @@ def get_userdata_url(self, vm):
         user_data_url = 'curl "http://' + gateway + ':80/latest/user-data"'
         return user_data_url
 
-    def validate_firewall_rule(self, fw_rule):
-        pass
+    def generate_ssh_keys(self):
+        """Generates ssh key pair
 
-    def validate_StaticNat_rule_For_VM(self, public_ip, network, vm):
-        self.validate_PublicIPAddress(
-            public_ip, network, static_nat=True, vm=vm)
+        Writes the private key into a temp file and returns the file name
 
-    def create_and_verify_fip_and_fw(self, vm, public_ip, network):
+        :returns: generated keypair
+        :rtype: MySSHKeyPair
         """
-        Creates and verifies (Ingress) firewall rule
-        with a Static NAT rule enabled public IP"""
+        self.keypair = MySSHKeyPair.create(
+            self.api_client,
+            name=random_gen() + ".pem",
+            account=self.account.user[0].account,
+            domainid=self.account.domainid)
 
-        self.debug("Creating and verifying firewall rule")
-        self.create_StaticNatRule_For_VM(vm, public_ip, network)
+        self.cleanup.append(self.keypair)
+        self.debug("Created keypair with name: %s" % self.keypair.name)
+        self.debug("Writing the private key to local file")
+        pkfile = tempfile.gettempdir() + os.sep + self.keypair.name
+        self.keypair.private_key_file = pkfile
+        self.tmp_files.append(pkfile)
+        self.debug("File path: %s" % pkfile)
+        with open(pkfile, "w+") as f:
+            f.write(self.keypair.privatekey)
+        os.chmod(pkfile, 0o400)
 
-        # Verification
-        self.validate_StaticNat_rule_For_VM(public_ip, network, vm)
+        return self.keypair
 
-        fw_rule = self.create_FirewallRule(public_ip, self.test_data["ingress_rule"])
-        self.validate_firewall_rule(fw_rule)
-        self.debug("Successfully created and verified firewall rule")
+    def validate_acl_rule(self, fw_rule):
+        pass
+
+    def validate_vm_networking(self, vm):
+        pass
+
+    def validate_network_networking(self, network, vpc):
+        pass
+
+    def validate_shared_networking(self, network, vm):
+        pass
 
-    def mount_config_drive(self, ssh):
+    def validate_StaticNat_rule_For_VM(self, public_ip, network, vm):
+        self.validate_PublicIPAddress(
+            public_ip, network, static_nat=True, vm=vm)
+
+    # =========================================================================
+    # ---             Config Drive Validation helper methods                ---
+    # =========================================================================
+
+    def _mount_config_drive(self, ssh):
         """
         This method is to verify whether configdrive iso
         is attached to vm or not
-        Returns mount path if config drive is attached else False
+        Returns mount path if config drive is attached else None
         """
         mountdir = "/root/iso"
-        cmd = "blkid -t LABEL='config-2' /dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device"
+        cmd = "blkid -t LABEL='config-2' " \
+              "/dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device"
         tmp_cmd = [
-            'bash -c "if [ ! -d /root/iso ] ; then mkdir /root/iso ; fi"',
-            "umount /root/iso"]
+            'bash -c "if [ ! -d {0} ]; then mkdir {0}; fi"'.format(mountdir),
+            "umount %s" % mountdir]
+        self.debug("Unmounting drive from %s" % mountdir)
         for tcmd in tmp_cmd:
             ssh.execute(tcmd)
+
+        self.debug("Trying to find ConfigDrive device")
         configDrive = ssh.execute(cmd)
+        if not configDrive:
+            self.warn("ConfigDrive is not attached")
+            return None
+
         res = ssh.execute("mount {} {}".format(str(configDrive[0]), mountdir))
         if str(res).lower().find("mounting read-only") > -1:
-            self.debug("configDrive iso is mounted at location %s" % mountdir)
+            self.debug("ConfigDrive iso is mounted at location %s" % mountdir)
             return mountdir
         else:
             return None
@@ -307,7 +387,7 @@ def _get_config_drive_data(self, ssh, file, name, fail_on_missing=True):
 
         return content
 
-    def verify_config_drive_data(self, ssh, file, expected_content, name):
+    def _verify_config_drive_data(self, ssh, file, expected_content, name):
         """Verifies that the file contains the expected content
 
         :param ssh: SSH connection to the VM
@@ -328,65 +408,93 @@ def verify_config_drive_data(self, ssh, file, expected_content, name):
                          'Userdata found: %s is not equal to expected: %s'
                          % (actual_content, expected_content))
 
-    def verifyUserData(self, ssh, iso_path, userdata):
+    def _verify_userdata(self, ssh, mount_path, userdata):
         """
         verify Userdata
 
         :param ssh: SSH connection to the VM
-        :param iso_path: mount point of the config drive
+        :param mount_path: mount point of the config drive
         :param userdata: Expected userdata
         :type ssh: marvin.sshClient.SshClient
-        :type iso_path: str
+        :type mount_path: str
         :type userdata: str
         """
-        self.verify_config_drive_data(
+        self._verify_config_drive_data(
             ssh,
-            iso_path + "/cloudstack/userdata/user_data.txt",
+            mount_path + "/cloudstack/userdata/user_data.txt",
             userdata,
             "userdata (ACS)"
         )
 
-    def verifyOpenStackUserData(self, ssh, iso_path, userdata):
+    def _verify_openstack_userdata(self, ssh, mount_path, userdata):
         """
         verify Userdata in Openstack format
 
         :param ssh: SSH connection to the VM
-        :param iso_path: mount point of the config drive
+        :param mount_path: mount point of the config drive
         :param userdata: Expected userdata
         :type ssh: marvin.sshClient.SshClient
-        :type iso_path: str
+        :type mount_path: str
         :type userdata: str
         """
-        self.verify_config_drive_data(
+        self._verify_config_drive_data(
             ssh,
-            iso_path + "/openstack/latest/user_data",
+            mount_path + "/openstack/latest/user_data",
             userdata,
             "userdata (Openstack)"
         )
 
-    def verifyPassword(self, ssh, iso_path, password):
-        self.debug("Expected VM password is %s " % password.password)
-        password_file = iso_path + "/cloudstack/password/vm_password.txt"
+    def _verifyPassword(self, ssh, mount_path, password_test):
+        """
+        Verify Password
+        :param ssh: SSH connection to the VM
+        :param mount_path: Mount path of the config drive disk
+        :param password_test: expected Password behavior
+        :type ssh: marvin.sshClient.SshClient
+        :type mount_path: str
+        :type password_test: ConfigDriveUtils.PasswordTest
+        """
+
+        if not password_test.test_presence:
+            return
+
+        if password_test.password is not None:
+            self.debug("Expected VM password is %s " % password_test.password)
+
+        password_file = mount_path + "/cloudstack/password/vm_password.txt"
         vmpassword = self._get_config_drive_data(ssh, password_file,
                                                  "ConfigDrive password",
                                                  fail_on_missing=False)
 
-        self.debug("ConfigDrive password is %s " % vmpassword)
+        password_found = NO_SUCH_FILE not in vmpassword
 
-        if NO_SUCH_FILE in vmpassword:
-            self.debug("Password file is not found")
-            return False, False
-        elif (password.password is not None) \
-                and (password.password in vmpassword):
-            self.debug("Expected Password is found in configDriveIso")
-            return True, True
-        else:
-            self.debug("Expected password is not found in configDriveIso")
-            return True, False
+        self.assertEqual(password_found, password_test.presence,
+                         "Expected is that password is present: %s "
+                         " but found is: %s"
+                         % (password_test.presence, password_found))
+
+        if password_test.password is not None:
+            self.debug("ConfigDrive password is %s " % vmpassword)
+            self.debug("Expected Password for vm is %s" %
+                       password_test.password)
+            self.assertTrue(password_test.password in vmpassword,
+                            "Password value test failed, expected %s, was %s" %
+                            (password_test.password, vmpassword))
+
+    def _verify_ssh_key(self, ssh, mount_path, ssh_key):
+        """
+        Verify SSH Key
+        :param ssh: SSH connection to the VM
+        :param mount_path: Mount path of the config drive disk
+        :param ssh_key: expected SSH key
+        :type ssh: marvin.sshClient.SshClient
+        :type mount_path: str
+        :type ssh_key: MySSHKeyPair
+        """
 
-    def verifySshKey(self, ssh, iso_path, ssh_key):
-        self.debug("Expected VM sshkey is %s " % ssh_key.name)
-        publicKey_file = iso_path + "/cloudstack/metadata/public-keys.txt"
+        self.debug("Fingerprint of Expected sshkey %s is %s " %
+                   (ssh_key.name, ssh_key.fingerprint))
+        publicKey_file = mount_path + "/cloudstack/metadata/public-keys.txt"
         cmd = "ssh-keygen -lf %s | cut -f2 -d' '" % publicKey_file
         res = ssh.execute(cmd)
         vmsshkey = str(res[0])
@@ -402,19 +510,19 @@ def verifySshKey(self, ssh, iso_path, ssh_key):
             "Fingerprint of authorized key does not match ssh key fingerprint"
         )
 
-    def verifyMetaData(self, vm, ssh, iso_path):
+    def _verify_metadata(self, vm, ssh, mount_path):
         """
         verify metadata files in CloudStack format
 
         :param vm: the VM
         :param ssh: SSH connection to the VM
-        :param iso_path: mount point of the config drive
+        :param mount_path: mount point of the config drive
         :type vm: VirtualMachine
         :type ssh: marvin.sshClient.SshClient
-        :type iso_path: str
+        :type mount_path: str
         """
 
-        metadata_dir = iso_path + "/cloudstack/metadata/"
+        metadata_dir = mount_path + "/cloudstack/metadata/"
         vm_files = ["availability-zone.txt",
                     "service-offering.txt",
                     "instance-id.txt",
@@ -423,13 +531,15 @@ def verifyMetaData(self, vm, ssh, iso_path):
                     "local-ipv4.txt",
                     "public-ipv4.txt"]
 
-        get_name = lambda file: \
-            "{} metadata".format(file.split('.'[-1].replace('-', ' ')))
+        def get_name(vm_file):
+            return "{} metadata".format(
+                vm_file.split('.'[-1].replace('-', ' '))
+            )
 
         metadata = {vm_file:
-                        self._get_config_drive_data(ssh,
-                                                    metadata_dir + vm_file,
-                                                    get_name(vm_file))
+                    self._get_config_drive_data(ssh,
+                                                metadata_dir + vm_file,
+                                                get_name(vm_file))
                     for vm_file in vm_files}
 
         self.assertEqual(
@@ -439,7 +549,7 @@ def verifyMetaData(self, vm, ssh, iso_path):
         )
         self.assertEqual(
             str(metadata["local-hostname.txt"]),
-            vm.instancename,
+            vm.name,
             "vm name inside metadata does not match with the "
             "instance name"
         )
@@ -463,16 +573,17 @@ def verifyMetaData(self, vm, ssh, iso_path):
         )
         return
 
-    def verifyOpenStackData(self, ssh, iso_path):
+    def _verify_openstack_metadata(self, ssh, mount_path):
         """
-        verify existence of metadata and user data files in OpenStack format
+        verify existence of metadata and user data files
+        in OpenStack format
 
         :param ssh: SSH connection to the VM
-        :param iso_path: mount point of the config drive
+        :param mount_path: mount point of the config drive
         :type ssh: marvin.sshClient.SshClient
-        :type iso_path: str
+        :type mount_path: str
         """
-        openstackdata_dir = iso_path + "/openstack/latest/"
+        openstackdata_dir = mount_path + "/openstack/latest/"
         openstackdata_files = ["meta_data.json",
                                "vendor_data.json",
                                "network_data.json"]
@@ -481,51 +592,42 @@ def verifyOpenStackData(self, ssh, iso_path):
             if NO_SUCH_FILE in res[0]:
                 self.fail("{} file not found in vm openstack".format(file))
 
-    def generate_ssh_keys(self):
-        """Generates ssh key pair
-
-        Writes the private key into a temp file and returns the file name
-
-        :returns: path of the private key file
-
-        """
-        self.keypair = MySSHKeyPair.create(
-            self.api_client,
-            name=random_gen() + ".pem",
-            account=self.account.user[0].account,
-            domainid=self.account.domainid)
-
-        self.cleanup.append(self.keypair)
-        self.debug("Created keypair with name: %s" % self.keypair.name)
-        self.debug("Writing the private key to local file")
-        pkfile = tempfile.gettempdir() + os.sep + self.keypair.name
-        self.keypair.private_key_file = pkfile
-        self.tmp_files.append(pkfile)
-        self.debug("File path: %s" % pkfile)
-        with open(pkfile, "w+") as f:
-            f.write(self.keypair.privatekey)
-        os.chmod(pkfile, 0o400)
-
-        return self.keypair
-
-    def umount_config_drive(self, ssh, iso_path):
+    def _umount_config_drive(self, ssh, mount_path):
         """unmount config drive inside guest vm
 
         :param ssh: SSH connection to the VM
-        :param iso_path: mount point of the config drive
+        :param mount_path: mount point of the config drive
         :type ssh: marvin.sshClient.SshClient
-        :type iso_path: str
+        :type mount_path: str
         """
-        ssh.execute("umount -d %s" % iso_path)
+        ssh.execute("umount -d %s" % mount_path)
         # Give the VM time to unlock the iso device
         time.sleep(2)
         # Verify umount
-        result = ssh.execute("ls %s" % iso_path)
+        result = ssh.execute("ls %s" % mount_path)
         self.assertTrue(len(result) == 0,
                         "After umount directory should be empty "
                         "but contains: %s" % result)
 
-    def update_provider_state(self, new_state):
+    # =========================================================================
+    # ---                    Gherkin style helper methods                   ---
+    # =========================================================================
+
+    def given_template_password_enabled_is(self, new_state):
+        """Updates value of the guest VM template's password enabled setting
+        :param new_state:
+        :type new_state: bool
+        """
+        orig_state = self.template.passwordenabled
+        self.debug("Updating guest VM template to password enabled "
+                   "from %s to %s" % (orig_state, new_state))
+        if orig_state != new_state:
+            self.update_template(passwordenabled=new_state)
+        self.assertEqual(self.template.passwordenabled, new_state,
+                         "Guest VM template is not password enabled")
+        return orig_state
+
+    def given_config_drive_provider_is(self, new_state):
         """
         Enables or disables the ConfigDrive Service Provider
 
@@ -543,9 +645,223 @@ def update_provider_state(self, new_state):
         self.validate_NetworkServiceProvider("ConfigDrive", state=new_state)
         return orig_state
 
+    def given_a_network_offering(self, offering_name):
+        self.offering = self.create_NetworkOffering(self._get_test_data(
+            offering_name))
+
+    def given_a_network_offering_with_configdrive(self):
+        self.given_a_network_offering(self.get_network_offering_name())
+
+    def given_a_network_offering_for_vpc_with_configdrive(self):
+        self.given_a_network_offering(self.get_network_offering_name_for_vpc())
+
+    def given_a_vpc_with_offering(self, offering_name):
+        self.given_config_drive_provider_is("Enabled")
+        create_vpc = self.verify_vpc_creation(
+            offering_name=offering_name)
+        self.assertTrue(create_vpc.success,
+                        "Vpc found success = %s, expected success = %s"
+                        % (str(create_vpc.success), 'True'))
+        self.vpc_acl_list = self.create_NetworkAclList(
+            name="acl", description="acl", vpc=create_vpc.vpc)
+        self.vpc_acl_rule = self.create_NetworkAclRule(
+            self.test_data["ingress_rule"], acl_list=self.vpc_acl_list)
+
+        self.vpc = create_vpc.vpc
+
+    def given_a_vpc(self):
+        self.given_a_vpc_with_offering(self.get_vpc_offering_name())
+
+    def when_I_create_a_network_with_that_offering(self, gateway='10.1.1.1'):
+        return self.verify_network_creation(
+            offering=self.offering,
+            gateway=gateway)
+
+    def when_I_create_a_vpc_tier_with_that_offering(self, gateway='10.1.1.1'):
+        return self.verify_network_creation(
+            offering=self.offering,
+            gateway=gateway,
+            vpc=self.vpc,
+            acl_list=self.vpc_acl_list)
+
+    def when_I_restart_the_vpc_with(self, cleanup=True):
+        self.restart_Vpc(self.vpc, cleanup=cleanup)
+        self.validate_vpc(self.vpc, state="Enabled")
+
+    def when_I_restart_the_network_with(self, network, cleanup):
+        network.restart(self.api_client, cleanup=cleanup)
+        self.validate_Network(network, state="Implemented")
+
+    def when_I_deploy_a_vm(self, networks, acl_item=None,
+                           vpc=None, keypair=None):
+
+        test_data = self.test_data["virtual_machine_userdata"]
+
+        vm = self.create_VM(
+            networks,
+            testdata=test_data,
+            keypair=keypair)
+
+        # Check VM
+        self.check_VM_state(vm, state="Running")
+        self.validate_vm_networking(vm)
+
+        if keypair and vm.passwordenabled:
+            self._decrypt_password(vm)
+
+        vm.key_pair = self.keypair if keypair else None
+        vm.user_data = test_data["userdata"]
+        vm.password_test = self.PasswordTest(vm=vm) \
+            if vm.passwordenabled \
+            else self.PasswordTest(expect_pw=False)
+
+        # Check networks
+        network_list = networks \
+            if hasattr(networks, "__iter__") \
+            else [networks]
+
+        for network in network_list:
+            self.validate_Network(network, state="Implemented")
+            if network.type == "Shared":
+                self.validate_shared_networking(network, self.vpc)
+            else:
+                self.validate_network_networking(network, self.vpc)
+
+        if self.vpc_acl_rule is not None:
+            self.validate_acl_rule(self.vpc_acl_rule)
+
+        return vm
+
+    def when_I_deploy_a_vm_with_keypair_in(self, network):
+        return self.when_I_deploy_a_vm(network,
+                                       keypair=self.keypair.name)
+
+    def when_I_create_a_static_nat_ip_to(self, vm, network, public_ip=None):
+        """
+        Creates and verifies (Ingress) firewall rule
+        with a Static NAT rule enabled public IP
+        :type vm: VirtualMachine
+        :type network: Network
+        :type public_ip: PublicIPAddress
+        :rtype: PublicIPAddress
+        """
+
+        if not public_ip:
+            public_ip = self.acquire_PublicIPAddress(network, vpc=self.vpc)
+
+        self.debug("Creating and verifying firewall rule")
+        self.create_StaticNatRule_For_VM(vm, public_ip, network)
+
+        # Verification
+        self.validate_StaticNat_rule_For_VM(public_ip, network, vm)
+
+        if not self.vpc:
+            fw_rule = self.create_FirewallRule(public_ip,
+                                               self.test_data["ingress_rule"])
+            self.validate_acl_rule(fw_rule)
+            self.debug("Successfully created and verified firewall rule")
+
+        self.cleanup.append(public_ip)
+
+        return public_ip
+
+    def then_creating_a_network_with_that_offering_fails(self):
+        create_network = self.verify_network_creation(
+            offering=self.offering,
+            gateway='10.6.6.6')
+        self.assertFalse(create_network.success,
+                         'Network found success = %s, expected success =%s'
+                         % (str(create_network.success), 'False'))
+
+    def then_creating_a_vpc_tier_with_that_offering_fails(self):
+        create_network = self.verify_network_creation(
+            offering=self.offering,
+            gateway='10.6.6.6',
+            vpc=self.vpc,
+            acl_list=self.vpc_acl_list)
+        self.assertFalse(create_network.success,
+                         'Create Tier success = %s, expected success =%s'
+                         % (str(create_network.success), 'False'))
+
+    def then_the_network_has(self, network_result, state):
+        self.validate_Network(network_result.network, state=state)
+
+    def then_the_network_is_successfully_created(self, network):
+        self.assertTrue(network.success,
+                        'Network found success = %s, expected success = %s'
+                        % (str(network.success), 'True'))
+
+    def then_vr_is_as_expected(self, network):
+        self.check_Router_state(network=network, state="Running")
+
+    def then_config_drive_is_as_expected(self, vm,
+                                         public_ip,
+                                         metadata=False,
+                                         reconnect=True):
+        """Verify Config Drive Content
+
+        :param vm: Virtual Machine
+        :param public_ip: public IP
+        :param metadata: whether to verify metadata
+        :param reconnect: recreate SSH Connection
+        :type vm: VirtualMachine
+        :type public_ip: PublicIPAddress
+        :type metadata: bool
+        :type reconnect: bool
+        """
+
+        if self.isSimulator:
+            self.debug("Simulator Environment: "
+                       "Skipping Config Drive content verification")
+            return
+
+        self.debug("SSHing into the VM %s" % vm.name)
+
+        ssh = self.ssh_into_VM(vm, public_ip, reconnect=reconnect)
+
+        d = {x.name: x for x in ssh.logger.handlers}
+        ssh.logger.handlers = list(d.values())
+
+        mount_path = self._mount_config_drive(ssh)
+        self.assertIsNotNone(mount_path,
+                             'ConfigdriveIso is not attached to vm')
+        if metadata:
+            self.debug("Verifying metadata for vm: %s" % vm.name)
+            self._verify_metadata(vm, ssh, mount_path)
+            self.debug("Verifying openstackdata for vm: %s" % vm.name)
+            self._verify_openstack_metadata(ssh, mount_path)
+
+        if hasattr(vm, "user_data") and vm.user_data is not None:
+            self.debug("Verifying userdata for vm: %s" % vm.name)
+            self._verify_userdata(ssh, mount_path, vm.user_data)
+            self._verify_openstack_userdata(ssh, mount_path, vm.user_data)
+
+        if hasattr(vm, "password_test") \
+                and vm.password_test is not None \
+                and vm.password_test.test_presence:
+            self.debug("Verifying password for vm: %s" % vm.name)
+            self._verifyPassword(ssh, mount_path, vm.password_test)
+
+        if hasattr(vm, "key_pair") and vm.key_pair is not None:
+            self.debug("Verifying sshkey for vm: %s" % vm.name)
+            self._verify_ssh_key(ssh, mount_path, vm.key_pair)
+
+        self._umount_config_drive(ssh, mount_path)
+
+    # =========================================================================
+
     def _get_test_data(self, key):
         return self.test_data[key]
 
+    def get_vpc_offering_name(self):
+        return "vpc_offering_configdrive"
+
+    def get_network_offering_name(self):
+        return "isolated_configdrive_network_offering"
+
+    def get_network_offering_name_for_vpc(self):
+        return "vpc_network_offering_configdrive"
+
     def get_configdrive_provider(self):
         return NetworkServiceProvider.list(
             self.api_client,
@@ -571,10 +887,10 @@ def verify_network_creation(self, offering=None,
         :param testdata: Test data
         :type testdata: dict
         :return: Network Creation Result
-        :rtype: CreateResult
+        :rtype: ConfigDriveUtils.CreateResult
         """
         if offering is None:
-            self.debug("Creating Nuage VSP network offering...")
+            self.debug("Creating network offering...")
             offering = self.create_NetworkOffering(
                 self._get_test_data(offering_name))
             self.validate_NetworkOffering(offering, state="Enabled")
@@ -585,8 +901,8 @@ def verify_network_creation(self, offering=None,
                                           acl_list=acl_list,
                                           testdata=testdata)
             return self.CreateResult(True, offering=offering, network=network)
-        except Exception:
-            self.debug("Exception: %s" % sys.exc_info()[0])
+        except Exception as e:
+            self.debug("Exception: %s" % e)
             return self.CreateResult(False, offering=offering)
 
     def verify_vpc_creation(self, offering=None, offering_name=None):
@@ -601,194 +917,154 @@ def verify_vpc_creation(self, offering=None, offering_name=None):
         :rtype: CreateResult
         """
         if offering is None:
-            self.debug("Creating Nuage VSP VPC offering...")
+            self.debug("Creating VPC offering...")
             offering = self.create_VpcOffering(
                 self._get_test_data(offering_name))
             self.validate_VpcOffering(offering, state="Enabled")
         try:
-            vpc = self.create_Vpc(offering, cidr='10.1.0.0/16')
-            self.validate_Vpc(vpc, state="Enabled")
+            vpc = self.create_vpc(offering, cidr='10.1.0.0/16')
+            self.validate_vpc(vpc, state="Enabled")
             return self.CreateResult(True, offering=offering, vpc=vpc)
-        except Exception:
+        except Exception as e:
+            self.debug("Exception: %s" % e)
             return self.CreateResult(False, offering=offering)
 
-    def update_password_enable_in_template(self, new_state):
-        self.debug("Updating guest VM template to password %s" % new_state)
-        orig_state = self.template.passwordenabled
-        if self.template.passwordenabled is not new_state:
-            self.updateTemplate(new_state)
-        self.assertEqual(self.template.passwordenabled, new_state,
-                         "Guest VM template is not password enabled")
-        return orig_state
-
-    def verify_config_drive_content(self, vm,
-                                    public_ip,
-                                    password_test,
-                                    userdata=None,
-                                    metadata=False,
-                                    ssh_key=None,
-                                    ssh_client=None):
-        """Verify Config Drive Content
-
-        :param vm:
-        :param public_ip:
-        :param password_test:
-        :param userdata:
-        :param metadata:
-        :param ssh_key:
-        :param ssh_client: SSH Connection
-        :type vm:
-        :type public_ip:
-        :type password_test:
-        :type userdata: object
-        :type metadata:
-        :type ssh_key:
-        :type ssh_client:
-        :return: SSH Connection
-        """
+    def _find_nic(self, vm, network):
+        vm = VirtualMachine.list(self.api_client, id=vm.id)[0]
+        return next(nic for nic in vm.nic if nic.networkid == network.id)
 
-        if self.isSimulator:
-            self.debug(
-                "Simulator Environment: Skipping Config Drive content verification")
-            return
+    def get_public_shared_ip(self, vm, network):
+        nic = self._find_nic(vm, network)
+        return PublicIPAddress({"ipaddress": nic})
 
-        self.debug("SSHing into the VM %s" % vm.name)
-        if ssh_client is None:
-            ssh = self.ssh_into_VM(vm, public_ip)
-        else:
-            ssh = ssh_client
-        d = {x.name: x for x in ssh.logger.handlers}
-        ssh.logger.handlers = list(d.values())
-        config_drive_path = self.mount_config_drive(ssh)
-        self.assertIsNotNone(config_drive_path,
-                             'ConfigdriveIso is not attached to vm')
-        if metadata:
-            self.debug("Verifying metadata for vm: %s" % vm.name)
-            self.verifyMetaData(vm, ssh, config_drive_path)
-            self.debug("Verifying openstackdata for vm: %s" % vm.name)
-            self.verifyOpenStackData(ssh, config_drive_path)
-
-        if userdata is not None:
-            self.debug("Verifying userdata for vm: %s" % vm.name)
-            self.verifyUserData(ssh, config_drive_path, userdata)
-            self.verifyOpenStackUserData(ssh, config_drive_path, userdata)
+    def plug_nic(self, vm, network):
+        vm.add_nic(self.api_client, network.id)
+        self.debug("Added NIC in VM with ID - %s and network with ID - %s"
+                   % (vm.id, network.id))
 
-        if password_test.test_presence:
-            self.debug("Verifying password for vm: %s" % vm.name)
-            test_result = self.verifyPassword(ssh, config_drive_path,
-                                              password_test)
-            self.assertEqual(test_result[0], password_test.presence,
-                             "Expected is that password is present: %s "
-                             " but found is: %s"
-                             % (test_result[0], password_test.presence))
+    def unplug_nic(self, vm, network):
+        nic = self._find_nic(vm, network)
+        vm.remove_nic(self.api_client, nic.id)
+        self.debug("Removed NIC with ID - %s in VM with ID - %s and "
+                   "network with ID - %s" % (nic.id, vm.id, network.id))
 
-        if password_test.password is not None:
-            self.debug("Password for vm is %s" % password_test.password)
-            self.assertEqual(test_result[1], True,
-                             "Password value test failed.")
-        if ssh_key is not None:
-            self.debug("Verifying sshkey for vm: %s" % vm.name)
-            self.verifySshKey(ssh, config_drive_path, ssh_key)
+    def update_default_nic(self, vm, network):
+        nic = self._find_nic(vm, network)
+        vm.update_default_nic(self.api_client, nic.id)
+        self.debug("Removed NIC with ID - %s in VM with ID - %s and "
+                   "network with ID - %s" % (nic.id, vm.id, network.id))
 
-        self.umount_config_drive(ssh, config_drive_path)
-        return ssh
-
-    def create_guest_vm(self, networks, acl_item=None,
-                        vpc=None, keypair=None):
-        vm = self.create_VM(
-            networks,
-            testdata=self.test_data["virtual_machine_userdata"],
-            keypair=keypair)
-        # Check VM
-        self.check_VM_state(vm, state="Running")
-
-        if keypair:
-            self.decrypt_password(vm)
-
-        # Check networks
-        network_list = []
-        if isinstance(networks, list):
-            for network in networks:
-                network_list.append(network)
-        else:
-            network_list.append(networks)
+    def when_I_update_userdata(self, vm, new_user_data):
+        """Updates the user data of a VM
 
-        for network in network_list:
-            self.validate_Network(network, state="Implemented")
+        :param public_ip: Public ip of the VM
+        :param vm: the Virtual Machine
+        :param new_user_data: UserData to set
+        :type public_ip: PublicIPAddress
+        :type vm: VirtualMachine
+        :type new_user_data: str
+        :returns: User data in base64 format
+        :rtype: str
+        """
+        self.debug("Updating userdata for VM - %s" % vm.name)
+        updated_user_data = base64.b64encode(new_user_data)
+        with self.stopped_vm(vm):
+            vm.update(self.api_client, userdata=updated_user_data)
 
-        if acl_item is not None:
-            self.validate_firewall_rule(acl_item)
-        return vm
+        vm.user_data = new_user_data
+        if vm.state == VirtualMachine.RUNNING:
+            vm.password_test = ConfigDriveUtils.PasswordTest(expect_pw=False)
 
-    # nic_operation_VM - Performs NIC operations such as add, remove, and
-    # update default NIC in the given VM and network
-    def nic_operation_VM(self, vm, network, operation="add"):
-        self.debug("Performing %s NIC operation in VM with ID - %s and "
-                   "network with ID - %s" % (operation, vm.id, network.id))
-        if operation is "add":
-            vm.add_nic(self.api_client, network.id)
-            self.debug("Added NIC in VM with ID - %s and network with ID - %s"
-                       % (vm.id, network.id))
-        vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0]
-        for nic in vm_info.nic:
-            if nic.networkid == network.id:
-                nic_id = nic.id
-        if operation is "update":
-            vm.update_default_nic(self.api_client, nic_id)
-            self.debug("Updated default NIC to NIC with ID- %s in VM with ID "
-                       "- %s and network with ID - %s" %
-                       (nic_id, vm.id, network.id))
-        if operation is "remove":
-            vm.remove_nic(self.api_client, nic_id)
-            self.debug("Removed NIC with ID - %s in VM with ID - %s and "
-                       "network with ID - %s" % (nic_id, vm.id, network.id))
-
-    def update_userdata(self, vm, new_user_data):
+    def update_and_validate_userdata(self, vm, new_user_data, public_ip=None,
+                                     **kwargs):
         """Updates the user data of a VM
 
+        :param public_ip: Public ip of the VM
         :param vm: the Virtual Machine
         :param new_user_data: UserData to set
+        :type public_ip: PublicIPAddress
         :type vm: VirtualMachine
         :type new_user_data: str
         :returns: User data in base64 format
         :rtype: str
         """
-        updated_user_data = base64.b64encode(new_user_data)
-        vm.update(self.api_client, userdata=updated_user_data)
-        return new_user_data
 
-    def reset_password(self, vm):
-        vm.password = vm.resetPassword(self.api_client)
+        self.when_I_update_userdata(vm, new_user_data)
+        self.then_config_drive_is_as_expected(vm, public_ip, **kwargs)
+
+    def when_I_reset_the_password(self, vm):
+        """Resets the password of a VM
+
+        :param vm: the Virtual Machine
+        :type vm: VirtualMachine
+        :returns: The new password
+        :rtype: str
+        """
+
+        self.debug("Resetting password for VM - %s" % vm.name)
+        with self.stopped_vm(vm):
+            vm.password = vm.resetPassword(self.api_client)
+
         self.debug("Password reset to - %s" % vm.password)
         self.debug("VM - %s password - %s !" %
                    (vm.name, vm.password))
 
+        vm.password_test = ConfigDriveUtils.PasswordTest(vm=vm)
+
+        return vm.password
+
+    def stop_and_start_vm(self, vm):
+        self.debug("+++ Verify userdata after stopstartVM - %s" % vm.name)
+        with self.stopped_vm(vm):
+            pass
+
+        vm.password_test = ConfigDriveUtils.PasswordTest(expect_pw=False)
+
+    def delete_and_recover_vm(self, vm):
+        self.debug("+++ Verify userdata after VM recover- %s" % vm.name)
+        vm.delete(self.api_client, expunge=False)
+
+        self.debug("Recover VM - %s" % vm.name)
+        vm.recover(self.api_client)
+        vm.start(self.api_client)
+        vm.state = VirtualMachine.RUNNING
+
+        vm.password_test = ConfigDriveUtils.PasswordTest(expect_pw=False)
+
     def wait_until_done(self, thread_list, name):
         for aThread in thread_list:
             self.debug("[Concurrency]Join %s for vm %s" % (name,
                                                            aThread.get_vm()))
             aThread.join()
 
-    def update_sshkeypair(self, vm):
+    def update_and_validate_sshkeypair(self, vm, public_ip=None):
         """
 
         :type vm: VirtualMachine
         """
-        vm.stop(self.api_client)
-        vm_new_ssh = vm.resetSshKey(self.api_client,
-                       keypair=self.keypair.name,
-                       account=self.account.user[0].account,
-                       domainid=self.account.domainid)
+
+        self.generate_ssh_keys()
+
+        with self.stopped_vm(vm):
+            vm_new_ssh = vm.resetSshKey(self.api_client,
+                                        keypair=self.keypair.name,
+                                        account=self.account.user[0].account,
+                                        domainid=self.account.domainid)
 
         self.debug("Sshkey reset to - %s" % self.keypair.name)
-        vm.start(self.api_client)
 
         vm.details = vm_new_ssh.details
 
         # reset SSH key also resets the password.
-        self.decrypt_password(vm)
+        self._decrypt_password(vm)
+
+        vm.password_test = ConfigDriveUtils.PasswordTest(vm=vm)
+        vm.key_pair = self.keypair
 
-    def decrypt_password(self, vm):
+        if public_ip:
+            self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+    def _decrypt_password(self, vm):
         """Decrypt VM password
 
         the new password is available in VM detail,
@@ -798,23 +1074,37 @@ def decrypt_password(self, vm):
 
         :type vm: VirtualMachine
         """
-        try:
+        password_ = vm.details['Encrypted.Password']
+        if password_ is not None:
             from base64 import b64decode
-            from Crypto.PublicKey import RSA
-            from Crypto.Cipher import PKCS1_v1_5
-            with open(self.keypair.private_key_file, "r") as pkfile:
-                key = RSA.importKey(pkfile.read())
-                cipher = PKCS1_v1_5.new(key)
-            new_password = cipher.decrypt(
-                b64decode(vm.details['Encrypted.Password']), None)
-            if new_password:
-                vm.password = new_password
-            else:
-                self.debug("Failed to decrypt new password")
-        except:
-            self.debug("Failed to decrypt new password")
+            try:
+                from Crypto.PublicKey import RSA
+                from Crypto.Cipher import PKCS1_v1_5
+                with open(self.keypair.private_key_file, "r") as pkfile:
+                    key = RSA.importKey(pkfile.read())
+                    cipher = PKCS1_v1_5.new(key)
+                new_password = cipher.decrypt(b64decode(password_), None)
+                if new_password:
+                    vm.password = new_password
+                else:
+                    self.fail("Failed to decrypt new password")
+            except ImportError:
+                # No pycrypto, fallback to openssl
+                cmd = ["echo " +  password_ +
+                       " | base64 -d"
+                       " | openssl rsautl -decrypt -inkey "
+                       + self.keypair.private_key_file
+                       + " 2> /dev/null"
+                ]
+
+                new_password = subprocess.check_output(cmd, shell=True)
+                self.debug("Decrypted password %s" % new_password)
+                if new_password:
+                    vm.password = new_password
+                else:
+                    self.fail("Failed to decrypt new password")
 
-    def add_subnet_verify(self, network, services):
+    def add_subnet_to_shared_network_and_verify(self, network, services):
         """verify required nic is present in the VM"""
 
         self.debug("Going to add new ip range in shared network %s" %
@@ -843,15 +1133,28 @@ def add_subnet_verify(self, network, services):
         )
         return addedsubnet
 
-    def ssh_into_VM(self, vm, public_ip, keypair):
+    def ssh_into_VM(self, vm, public_ip, reconnect=True, keypair=None):
         pass
 
+    def delete(self, obj, **kwargs):
+        if isinstance(obj, VirtualMachine) and "expunge" not in kwargs:
+            kwargs["expunge"] = True
+
+        obj.delete(self.api_client, **kwargs)
+
+        if obj in self.cleanup:
+            self.cleanup.remove(obj)
+
 
 class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
     """Test user data and password reset functionality
     using configDrive
     """
 
+    def __init__(self, methodName='runTest'):
+        super(cloudstackTestCase, self).__init__(methodName)
+        ConfigDriveUtils.__init__(self)
+
     @classmethod
     def setUpClass(cls):
         # We want to fail quicker, if it's a failure
@@ -861,17 +1164,19 @@ def setUpClass(cls):
         cls.api_client = test_client.getApiClient()
         cls.db_client = test_client.getDbConnection()
         cls.test_data = test_client.getParsedTestDataConfig()
+        cls.test_data.update(Services().services)
 
         # Get Zone, Domain and templates
         cls.zone = get_zone(cls.api_client)
         cls.domain = get_domain(cls.api_client)
-        cls.template = get_template(cls.api_client,
-                                    cls.zone.id,
-                                    cls.test_data["ostype"]
-                                    )
+
+        cls.hypervisor = cls.testClient.getHypervisorInfo()
+        cls.template = get_test_template(cls.api_client,
+                                         cls.zone.id,
+                                         cls.hypervisor,
+                                         cls.test_data["test_templates"])
         cls.test_data["virtual_machine"]["zoneid"] = cls.zone.id
         cls.test_data["virtual_machine"]["template"] = cls.template.id
-        cls.test_data.update(Services().services)
 
         # Create service offering
         cls.service_offering = ServiceOffering.create(
@@ -892,6 +1197,7 @@ def setUp(self):
                                       )
         self.tmp_files = []
         self.cleanup = [self.account]
+        self.generate_ssh_keys()
         return
 
     @classmethod
@@ -948,9 +1254,13 @@ def create_StaticNatRule_For_VM(self, vm, public_ip, network,
                    (public_ip.ipaddress.ipaddress, vm.id, network.id))
         return static_nat_rule
 
-    # create_FirewallRule - Creates (Ingress) Firewall rule on the given
-    # Static NAT rule enabled public IP for Isolated networks
     def create_FirewallRule(self, public_ip, rule=None):
+        """Creates an Ingress Firewall Rule on the given public IP
+           to allow traffic to a VM in an isolated network
+        :param public_ip: Static NAT rule enabled public IP
+        :param rule: (optional) Rule to add, defaults to test_data
+        :type public_ip: PublicIPAddress
+        """
         if not rule:
             rule = self.test_data["ingress_rule"]
         self.debug("Adding an (Ingress) Firewall rule to make Guest VMs "
@@ -964,10 +1274,10 @@ def create_FirewallRule(self, public_ip, rule=None):
                                    )
 
     # validate_NetworkServiceProvider - Validates the given Network Service
-    # Provider in the Nuage VSP Physical Network, matches the given provider
+    # Provider in the Physical Network, matches the given provider
     # name and state against the list of providers fetched
     def validate_NetworkServiceProvider(self, provider_name, state=None):
-        """Validates the Network Service Provider in the Nuage VSP Physical
+        """Validates the Network Service Provider in the Physical
         Network"""
         self.debug("Validating the creation and state of Network Service "
                    "Provider - %s" % provider_name)
@@ -1037,13 +1347,19 @@ def create_NetworkOffering(self, net_offering, suffix=None,
         # Enable Network offering
         nw_off.update(self.api_client, state="Enabled")
         self.debug("Created and Enabled Network offering")
+        self.cleanup.append(nw_off)
         return nw_off
 
     # validate_NetworkOffering - Validates the given Network offering, matches
     # the given network offering name and state against the list of network
     # offerings fetched
     def validate_NetworkOffering(self, net_offering, state=None):
-        """Validates the Network offering"""
+        """Validates the Network offering.
+        :param net_offering: Network Offering to validate
+        :param state: expected state
+        :type net_offering: NetworkOffering
+        :type state: str
+        """
         self.debug("Validating the creation and state of Network offering - %s"
                    % net_offering.name)
         net_offs = NetworkOffering.list(self.api_client,
@@ -1082,11 +1398,13 @@ def create_Network(self, nw_off, gateway="10.1.1.1",
                                  domainid=account.domainid,
                                  networkofferingid=nw_off.id,
                                  zoneid=self.zone.id,
-                                 vpcid=vpc.id if vpc else self.vpc.id
-                                 if hasattr(self, "vpc") else None,
+                                 vpcid=vpc.id if vpc
+                                 else self.vpc.id if self.vpc
+                                 else None,
                                  aclid=acl_list.id if acl_list else None
                                  )
         self.debug("Created network with ID - %s" % network.id)
+        self.cleanup.append(network)
         return network
 
     # create_VpcOffering - Creates VPC offering
@@ -1100,10 +1418,11 @@ def create_VpcOffering(self, vpc_offering, suffix=None):
         # Enable VPC offering
         vpc_off.update(self.api_client, state="Enabled")
         self.debug("Created and Enabled VPC offering")
+        self.cleanup.append(vpc_off)
         return vpc_off
 
     # create_Vpc - Creates VPC with the given VPC offering
-    def create_Vpc(self, vpc_offering, cidr='10.1.0.0/16', testdata=None,
+    def create_vpc(self, vpc_offering, cidr='10.1.0.0/16', testdata=None,
                    account=None, networkDomain=None):
         if not account:
             account = self.account
@@ -1122,6 +1441,7 @@ def create_Vpc(self, vpc_offering, cidr='10.1.0.0/16', testdata=None,
                          networkDomain=networkDomain
                          )
         self.debug("Created VPC with ID - %s" % vpc.id)
+        self.cleanup.append(vpc)
         return vpc
 
     # validate_VpcOffering - Validates the given VPC offering, matches the
@@ -1150,7 +1470,7 @@ def validate_VpcOffering(self, vpc_offering, state=None):
 
     # validate_Vpc - Validates the given VPC, matches the given VPC name and
     # state against the list of VPCs fetched
-    def validate_Vpc(self, vpc, state=None):
+    def validate_vpc(self, vpc, state=None):
         """Validates the VPC"""
         self.debug("Validating the creation and state of VPC - %s" % vpc.name)
         vpcs = VPC.list(self.api_client,
@@ -1191,7 +1511,7 @@ def retry_ssh():
 
         return retry_ssh()
 
-    # create_VM - Creates VM in the given network(s)
+    # create_VM - Creates VM in the givsen network(s)
     def create_VM(self, network_list, host_id=None, start_vm=True,
                   testdata=None, account=None, keypair=None):
         network_ids = []
@@ -1220,12 +1540,14 @@ def create_VM(self, network_list, host_id=None, start_vm=True,
                                    )
         self.debug("Created VM with ID - %s in network(s) with ID(s) - %s"
                    % (vm.id, network_ids))
+        self.cleanup.append(vm)
         return vm
 
-    # check_VM_state - Checks if the given VM is in the expected state form the
-    # list of fetched VMs
     def check_VM_state(self, vm, state=None):
-        """Validates the VM state"""
+        """Validates the VM state
+        Checks if the given VM is in the expected state
+        from the list of fetched VMs
+        """
         self.debug("Validating the deployment and state of VM - %s" % vm.name)
         vms = VirtualMachine.list(self.api_client,
                                   id=vm.id,
@@ -1241,10 +1563,11 @@ def check_VM_state(self, vm, state=None):
         self.debug("Successfully validated the deployment and state of VM - %s"
                    % vm.name)
 
-    # validate_Network - Validates the given network, matches the given network
-    # name and state against the list of networks fetched
     def validate_Network(self, network, state=None):
-        """Validates the network"""
+        """Validates the network
+           matches the given network name and state
+           against the list of networks fetched
+        """
         self.debug("Validating the creation and state of Network - %s" %
                    network.name)
         networks = Network.list(self.api_client,
@@ -1264,8 +1587,8 @@ def validate_Network(self, network, state=None):
         self.debug("Successfully validated the creation and state of Network "
                    "- %s" % network.name)
 
-    # get_Router - Returns router for the given network
     def get_Router(self, network):
+        """Returns router for the given network"""
         self.debug("Finding the virtual router for network with ID - %s" %
                    network.id)
         routers = Router.list(self.api_client,
@@ -1280,27 +1603,37 @@ def get_Router(self, network):
 
     # check_Router_state - Checks if the given router is in the expected state
     # form the list of fetched routers
-    def check_Router_state(self, router, state=None):
+    def check_Router_state(self, router=None, network=None, state=None):
         """Validates the Router state"""
-        self.debug("Validating the deployment and state of Router - %s" %
-                   router.name)
-        routers = Router.list(self.api_client,
-                              id=router.id,
-                              listall=True
-                              )
-        self.assertEqual(isinstance(routers, list), True,
-                         "List router should return a valid list"
-                         )
+
+        if router:
+            self.debug("Validating the deployment and state of Router - %s" %
+                       router.name)
+            routers = Router.list(self.api_client, id=router.id,
+                                  listall=True)
+        elif network:
+            self.debug("Validating the deployment and state of Router "
+                       "in network - %s" % network.name)
+            routers = Router.list(self.api_client, networkid=network.id,
+                                  listall=True)
+        else:
+            raise AttributeError("Either router or network "
+                                 "has to be specified")
+
+        self.assertTrue(isinstance(routers, list),
+                        "List router should return a valid list")
+        self.assertTrue(len(routers) > 0,
+                        "List routers should not return an empty list")
+
         if state:
             self.assertEqual(routers[0].state, state,
                              "Virtual router is not in the expected state"
                              )
         self.debug("Successfully validated the deployment and state of Router "
-                   "- %s" % router.name)
+                   "- %s" % routers[0].name)
 
-    # acquire_PublicIPAddress - Acquires public IP address for the given
-    # network/VPC
     def acquire_PublicIPAddress(self, network, vpc=None, account=None):
+        """Acquires public IP address for the given network/VPC"""
         if not account:
             account = self.account
         self.debug("Associating public IP for network with ID - %s in the "
@@ -1309,17 +1642,21 @@ def acquire_PublicIPAddress(self, network, vpc=None, account=None):
                                            accountid=account.name,
                                            domainid=account.domainid,
                                            zoneid=self.zone.id,
-                                           networkid=network.id
-                                           if vpc is None else None,
-                                           vpcid=vpc.id if vpc else self.vpc.id
-                                           if hasattr(self, "vpc") else None
+                                           networkid=network.id if vpc is None
+                                           else None,
+                                           vpcid=vpc.id if vpc
+                                           else self.vpc.id if self.vpc
+                                           else None
                                            )
         self.debug("Associated public IP address - %s with network with ID - "
                    "%s" % (public_ip.ipaddress.ipaddress, network.id))
+        self.cleanup.append(public_ip)
         return public_ip
 
-    # migrate_VM - Migrates VM to another host, if available
     def migrate_VM(self, vm):
+        """Migrates VM to another host, if available"""
+        self.debug("+++ Migrating one of the VMs in the created "
+                   "VPC Tier network to another host, if available...")
         self.debug("Checking if a host is available for migration...")
         hosts = Host.listForMigration(self.api_client, virtualmachineid=vm.id)
         if hosts:
@@ -1339,337 +1676,6 @@ def migrate_VM(self, vm):
             self.debug("No host available for migration. "
                        "Test requires at-least 2 hosts")
 
-    @attr(tags=["advanced", "isonw"], required_hardware="true")
-    def test_configdrive_isolated_network(self):
-        """Test Configdrive as provider for isolated Networks
-           to provide userdata and password reset functionality
-        """
-
-        # 1. When ConfigDrive is disabled as provider in zone
-        #    Verify Isolated Network creation with a network offering
-        #    which has userdata provided by ConfigDrive fails
-        # 2. When ConfigDrive is enabled as provider in zone
-        #    Create an Isolated Network with Isolated Network
-        #    offering specifying ConfigDrive as serviceProvider
-        #    for userdata.
-        #    check if it is successfully created and
-        #    is in the "Allocated" state.
-        # 3. Deploy a VM in the created Isolated network with user data,
-        #    check if the Isolated network state is changed to
-        #    "Implemented", and the VM is successfully deployed and
-        #    is in the "Running" state.
-        # 4. SSH into the deployed VM and verify its user data in the iso
-        #    (expected user data == actual user data).
-        # 5. Verify that the guest VM's password in the iso.
-        # 6. Reset VM password, and start the VM.
-        # 7. Verify that the new guest VM template is password enabled by
-        #     checking the VM's password (password != "password").
-        # 8. SSH into the VM for verifying its new password
-        #     after its password reset.
-        # 9. Verify various scenarios and check the data in configdriveIso
-        # 10. Delete all the created objects (cleanup).
-
-        self.debug("+++Testing configdrive in an Isolated network fails..."
-                   "as provider configdrive is still disabled...")
-        self.update_provider_state("Disabled")
-        create_network = self.verify_network_creation(
-            offering_name="isolated_configdrive_network_offering",
-            gateway='10.1.1.1')
-        self.assertFalse(create_network.success,
-                         'Network found success = %s, expected success =%s'
-                         % (str(create_network.success), 'False'))
-
-        self.debug("+++Test user data & password reset functionality "
-                   "using configdrive in an Isolated network")
-
-        self.update_provider_state("Enabled")
-        create_network1 = self.verify_network_creation(
-            offering=create_network.offering,
-            gateway='10.1.1.1')
-        self.assertTrue(create_network1.success,
-                        'Network found success = %s, expected success = %s'
-                        % (str(create_network1.success), 'True'))
-        self.validate_Network(create_network1.network, state="Allocated")
-        create_network2 = self.verify_network_creation(
-            offering=create_network.offering,
-            gateway='10.1.2.1')
-        self.assertTrue(create_network2.success,
-                        'Network found success = %s,expected success = %s'
-                        % (str(create_network2.success), 'True'))
-        self.validate_Network(create_network2.network, state="Allocated")
-        self.update_password_enable_in_template(True)
-
-        self.debug("+++Deploy VM in the created Isolated network "
-                   "with user data provider as configdrive")
-        self.generate_ssh_keys()
-        self.debug("keypair name %s " % self.keypair.name)
-        vm1 = self.create_guest_vm(create_network1.network,
-                                   keypair=self.keypair.name)
-
-        vr = self.get_Router(create_network1.network)
-        self.check_Router_state(vr, state="Running")
-
-        # We need to have the vm password
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-
-        public_ip_1 = self.acquire_PublicIPAddress(create_network1.network)
-        self.create_and_verify_fip_and_fw(vm1, public_ip_1,
-                                          create_network1.network)
-
-        self.verify_config_drive_content(
-            vm1, public_ip_1,
-            self.PasswordTest(vm1.password),
-            metadata=True,
-            userdata=self.test_data[
-                "virtual_machine_userdata"]["userdata"],
-            ssh_key=self.keypair)
-
-        expected_user_data1 = self.update_userdata(vm1, "helloworld vm1")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(True),
-                                         userdata=expected_user_data1)
-
-        self.generate_ssh_keys()
-        self.update_sshkeypair(vm1)
-        # After sshkey reset we need to have the vm password again
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         metadata=True,
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
-
-        self.debug("Adding a non-default nic to the VM "
-                   "making it a multi-nic VM...")
-        self.nic_operation_VM(vm1, create_network2.network,
-                              operation="add")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         metadata=True,
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "hellomultinicvm1")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
-
-        self.debug("updating non-default nic as the default nic "
-                   "of the multi-nic VM and enable staticnat...")
-        self.nic_operation_VM(vm1,
-                              create_network2.network, operation="update")
-
-        public_ip_2 = \
-            self.acquire_PublicIPAddress(create_network2.network)
-        self.create_and_verify_fip_and_fw(vm1, public_ip_2,
-                                          create_network2.network)
-        vm1.stop(self.api_client)
-        vm1.start(self.api_client)
-        self.verify_config_drive_content(vm1, public_ip_2,
-                                         self.PasswordTest(False),
-                                         metadata=True,
-                                         userdata=expected_user_data1)
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-        self.verify_config_drive_content(vm1, public_ip_2,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1)
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "hellomultinicvm1")
-        self.verify_config_drive_content(vm1, public_ip_2,
-                                         self.PasswordTest(True),
-                                         userdata=expected_user_data1)
-
-        self.debug("Updating the default nic of the multi-nic VM, "
-                   "deleting the non-default nic...")
-        self.nic_operation_VM(vm1,
-                              create_network1.network, operation="update")
-        vm1.stop(self.api_client)
-        vm1.start(self.api_client)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         metadata=True,
-                                         userdata=expected_user_data1)
-
-        multinicvm1 = self.create_guest_vm([create_network2.network,
-                                            create_network1.network])
-        multinicvm1.password = multinicvm1.resetPassword(self.api_client)
-        self.debug("MultiNICVM Password reset to - %s"
-                   % multinicvm1.password)
-        self.debug("MultiNICVM - %s password - %s !"
-                   % (multinicvm1.name, multinicvm1.password))
-
-        public_ip_3 = self.acquire_PublicIPAddress(create_network2.network)
-        self.create_and_verify_fip_and_fw(multinicvm1, public_ip_3,
-                                          create_network2.network)
-        self.verify_config_drive_content(
-            multinicvm1, public_ip_3,
-            self.PasswordTest(multinicvm1.password),
-            metadata=True,
-            userdata=self.test_data[
-                "virtual_machine_userdata"]["userdata"])
-        expected_user_data2 = self.update_userdata(multinicvm1,
-                                                   "hello multinicvm1")
-        self.verify_config_drive_content(multinicvm1, public_ip_3,
-                                         self.PasswordTest(True),
-                                         userdata=expected_user_data2)
-
-        multinicvm1.delete(self.api_client, expunge=True)
-        public_ip_3.delete(self.api_client)
-        public_ip_2.delete(self.api_client)
-        self.nic_operation_VM(vm1,
-                              create_network2.network, operation="remove")
-        create_network2.network.delete(self.api_client)
-
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-
-        self.debug("+++ Restarting the created Isolated network without "
-                   "cleanup...")
-        create_network1.network.restart(self.api_client, cleanup=False)
-        self.validate_Network(create_network1.network,
-                              state="Implemented")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created Isolated network with "
-                   "cleanup...")
-        create_network1.network.restart(self.api_client, cleanup=True)
-        self.validate_Network(create_network1.network,
-                              state="Implemented")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++Verifying userdata after rebootVM - %s" % vm1.name)
-        vm1.reboot(self.api_client)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         metadata=True,
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
-
-        self.debug("Updating userdata for VM - %s" % vm1.name)
-        expected_user_data1 = self.update_userdata(vm1, "hello afterboot")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
-        self.debug("Resetting password for VM - %s" % vm1.name)
-        self.reset_password(vm1)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password))
-
-        self.debug("+++ Migrating one of the VMs in the created Isolated "
-                   "network to another host, if available...")
-        self.migrate_VM(vm1)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("Updating userdata after migrating VM - %s" % vm1.name)
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "hello after migrate")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password),
-                                         userdata=expected_user_data1)
-        self.debug("Resetting password for VM - %s" % vm1.name)
-        self.reset_password(vm1)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password))
-
-        self.debug("+++Verify userdata after stopstartVM - %s" % vm1.name)
-        vm1.stop(self.api_client)
-        vm1.start(self.api_client)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("Updating userdata for VM - %s" % vm1.name)
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "hello afterstopstart")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data1)
-        self.debug("Resetting password for VM - %s" % vm1.name)
-        self.reset_password(vm1)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(vm1.password))
-
-        self.debug("+++ Verify userdata after VM recover- %s" % vm1.name)
-        vm1.delete(self.api_client, expunge=False)
-        self.debug("Recover VM - %s" % vm1.name)
-        vm1.recover(self.api_client)
-        vm1.start(self.api_client)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-        self.update_provider_state("Disabled")
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "hello after recover")
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ When template is not password enabled, "
-                   "verify configdrive of VM - %s" % vm1.name)
-        vm1.delete(self.api_client, expunge=True)
-        self.update_provider_state("Enabled")
-        self.updateTemplate(False)
-        self.generate_ssh_keys()
-        self.debug("keypair name %s " % self.keypair.name)
-        vm1 = self.create_guest_vm(create_network1.network,
-                                   keypair=self.keypair.name)
-
-        expected_user_data1 = self.update_userdata(vm1,
-                                                   "This is sample data")
-        public_ip_1 = \
-            self.acquire_PublicIPAddress(create_network1.network)
-        self.create_and_verify_fip_and_fw(vm1, public_ip_1,
-                                          create_network1.network)
-        self.verify_config_drive_content(vm1, public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-        vm1.delete(self.api_client, expunge=True)
-        create_network1.network.delete(self.api_client)
-
     # create_NetworkAclList - Creates network ACL list in the given VPC
     def create_NetworkAclList(self, name, description, vpc):
         self.debug("Adding NetworkACL list in VPC with ID - %s" % vpc.id)
@@ -1709,389 +1715,422 @@ def restart_Vpc(self, vpc, cleanup=False):
         self.api_client.restartVPC(cmd)
         self.debug("Restarted VPC with ID - %s" % vpc.id)
 
-    @attr(tags=["advanced", "vpc"], required_hardware="true")
-    def test_configdrive_vpc_network(self):
-        """Test Configdrive for VPC Networks
-           choose user data with configDrive as service provider
-           and test password reset functionality using ConfigDrive
+    @attr(tags=["advanced", "isonw"], required_hardware="true")
+    def test_configdrive_isolated_network(self):
+        """Test Configdrive as provider for isolated Networks
+           to provide userdata and password reset functionality
         """
 
-        # 1. Verify VPC Network creation with ConfigDrive fails
-        #    as ConfigDrive is disabled as provider
-        # 2. Create a VPC Network with VPC tier Network
-        #    offering specifying ConfigDrive as serviceProvider for userdata.
-        #    check if it is successfully created and is in "Allocated" state.
-        # 3. Deploy a VM in the created VPC tier network with user data,
-        #    check if the Isolated network state is changed to "Implemented",
-        #    and the VM is successfully deployed and is in "Running" state.
-        # 4. SSH into the deployed VM and verify its user data in the iso
-        #    (expected user data == actual user data).
-        # 5. Verify that the guest VM's password in the iso.
-        # 6. Reset VM password, and start the VM.
-        # 7. Verify that the new guest VM template is password enabled by
-        #    checking the VM's password (password != "password").
-        # 8. SSH into the VM for verifying its new password
-        #     after its password reset.
-        # 9. Verify various scenarios and check the data in configdrive iso
+        # 1. Given ConfigDrive provider is disabled in zone
+        #    And a network offering which has
+        #      user data provided by ConfigDrive
+        #    Then creating an Isolated Network
+        #    using that network offering fails
+
+        # 2. Given ConfigDrive provider is enabled in zone
+        #    And a network offering which has
+        #    * user data provided by ConfigDrive
+        #    When I create an Isolated Network using that network offering
+        #    Then the network is successfully created,
+        #    And is in the "Allocated" state.
+
+        # 3. When I deploy a VM in the created Isolated network with user data,
+        #    Then the Isolated network state is changed to "Implemented"
+        #    And the VM is successfully deployed and is in the "Running" state
+        #    And there is no VR is deployed.
+        # 4. And the user data in the ConfigDrive device is as expected
+        # 5. And the the vm password in the ConfigDrive device is as expected
+
+        # 6. When I stop, reset the password, and start the VM
+        # 7. Then I can login into the VM using the new password.
+        # 8. And the the vm password in the ConfigDrive device is the new one
+
+        # 9. Verify various scenarios and check the data in configdriveIso
         # 10. Delete all the created objects (cleanup).
-        self.update_provider_state("Enabled")
-        create_vpc = self.verify_vpc_creation(
-            offering_name="vpc_offering_configdrive")
-        self.assertTrue(create_vpc.success,
-                        "Vpc found success = %s, expected success = %s"
-                        % (str(create_vpc.success), 'True'))
-        acl_list = self.create_NetworkAclList(
-            name="acl", description="acl", vpc=create_vpc.vpc)
-        acl_item = self.create_NetworkAclRule(
-            self.test_data["ingress_rule"], acl_list=acl_list)
-        self.update_provider_state("Disabled")
-        self.debug("+++Testing configdrive in a VPC Tier network fails..."
-                   "as provider configdrive is still disabled...")
-        create_networkfails = \
-            self.verify_network_creation(
-                offering_name="vpc_network_offering_configdrive",
-                gateway='10.1.1.1',
-                vpc=create_vpc.vpc,
-                acl_list=acl_list)
-        self.assertFalse(create_networkfails.success,
-                         "Create Network found success = %s, "
-                         "expected success = %s"
-                         % (str(create_networkfails.success), 'False'))
-        self.debug("Testing user data&password reset functionality using"
-                   "configdrive in a VPC network...")
-        self.update_provider_state("Enabled")
-
-        create_tiernetwork = \
-            self.verify_network_creation(
-                offering=create_networkfails.offering,
-                gateway='10.1.1.1',
-                vpc=create_vpc.vpc,
-                acl_list=acl_list)
-        self.assertTrue(create_tiernetwork.success,
-                        "Create Network found success = %s, "
-                        "expected success = %s"
-                        % (str(create_tiernetwork.success), 'True'))
-        self.validate_Network(create_tiernetwork.network,
-                              state="Implemented")
-
-        vpc_vr = self.get_Router(create_tiernetwork.network)
-        self.check_Router_state(vpc_vr, state="Running")
-
-        create_tiernetwork2 = \
-            self.verify_network_creation(
-                offering=create_networkfails.offering,
-                gateway='10.1.2.1',
-                vpc=create_vpc.vpc,
-                acl_list=acl_list)
-        self.assertTrue(create_tiernetwork2.success,
-                        'Network found success= %s, expected success= %s'
-                        % (str(create_tiernetwork2.success), 'True'))
-        self.validate_Network(create_tiernetwork2.network,
-                              state="Implemented")
-
-        vpc_vr2 = self.get_Router(create_tiernetwork2.network)
-        self.check_Router_state(vpc_vr2, state="Running")
-
-        self.update_password_enable_in_template(True)
 
-        self.generate_ssh_keys()
-        self.debug("keypair name %s " % self.keypair.name)
-        vm = self.create_guest_vm(create_tiernetwork.network,
-                                  acl_item,
-                                  vpc=create_vpc.vpc,
-                                  keypair=self.keypair.name)
-
-        vpc_public_ip_1 = \
-            self.acquire_PublicIPAddress(create_tiernetwork.network,
-                                         create_vpc.vpc)
-        self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1,
-                                         create_tiernetwork.network)
-
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(True),
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        expected_user_data = self.update_userdata(vm, "helloworld vm1")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(True),
-                                         metadata=True,
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
+        self.debug("+++ Scenario: creating an Isolated network with "
+                   "config drive fails when config drive provider is "
+                   "disabled.")
+        self.given_config_drive_provider_is("Disabled")
+        self.given_a_network_offering_with_configdrive()
+        self.then_creating_a_network_with_that_offering_fails()
 
-        self.debug("Resetting password for VM - %s" % vm.name)
-        self.reset_password(vm)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
+        self.debug("+++ Preparation Scenario: "
+                   "creating an Isolated networks with "
+                   "config drive when config drive provider is "
+                   "enabled.")
 
-        self.generate_ssh_keys()
-        self.update_sshkeypair(vm)
-        # After sshkey reset we need to have the vm password again
-        vm.password = vm.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm.name, vm.password))
+        self.given_config_drive_provider_is("Enabled")
+
+        create_network1 = self.when_I_create_a_network_with_that_offering()
+        self.then_the_network_is_successfully_created(create_network1)
+        self.then_the_network_has(create_network1, state="Allocated")
+
+        create_network2 = self.when_I_create_a_network_with_that_offering()
+        self.then_the_network_is_successfully_created(create_network2)
+        self.then_the_network_has(create_network2, state="Allocated")
+
+        network1 = create_network1.network
+        network2 = create_network2.network
+
+        self.given_template_password_enabled_is(True)
+
+        self.debug("+++Deploy VM in the created Isolated network "
+                   "with user data provider as configdrive")
 
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         metadata=True,
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created vpc without "
-                   "cleanup...")
-        self.restart_Vpc(create_vpc.vpc, cleanup=False)
-        self.validate_Vpc(create_vpc.vpc, state="Enabled")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
+        vm1 = self.when_I_deploy_a_vm_with_keypair_in(network1)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network1)
+
+        self.then_vr_is_as_expected(network1)
+        self.then_config_drive_is_as_expected(
+            vm1, public_ip_1,
+            metadata=True)
+
+        self.update_and_validate_userdata(vm1, "helloworld vm1", public_ip_1)
+        self.update_and_validate_sshkeypair(vm1, public_ip_1)
+
+        # =====================================================================
 
         self.debug("Adding a non-default nic to the VM "
                    "making it a multi-nic VM...")
-        self.nic_operation_VM(vm, create_tiernetwork2.network,
-                              operation="add")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         metadata=True,
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-
-        vm.password = vm.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm.name, vm.password))
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         metadata=True,
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-        expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
+        self.plug_nic(vm1, network2)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        with self.stopped_vm(vm1):
+            self.when_I_reset_the_password(vm1)
+            self.when_I_update_userdata(vm1, "hellomultinicvm1")
+
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        # Test using network2 as default network
+        # =====================================================================
 
         self.debug("updating non-default nic as the default nic "
                    "of the multi-nic VM and enable staticnat...")
-        self.nic_operation_VM(vm,
-                              create_tiernetwork2.network,
-                              operation="update")
-        vm.stop(self.api_client)
-        vm.start(self.api_client)
-        vpc_public_ip_2 = \
-            self.acquire_PublicIPAddress(create_tiernetwork2.network,
-                                         create_vpc.vpc)
-        self.create_StaticNatRule_For_VM(vm, vpc_public_ip_2,
-                                     create_tiernetwork2.network)
-
-        self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                         self.PasswordTest(vm.password),
-                                         metadata=True,
-                                         userdata=expected_user_data1)
-        vm.password = vm.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm.name, vm.password))
-        self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1)
-        expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1")
-        self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                         self.PasswordTest(True),
-                                         userdata=expected_user_data1)
+        self.update_default_nic(vm1, network2)
+
+        public_ip_2 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network2)
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2, metadata=True)
+
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2)
+
+        user_data = "hellomultinicvm1again"
+        self.update_and_validate_userdata(vm1, user_data, public_ip_2)
 
         self.debug("Updating the default nic of the multi-nic VM, "
                    "deleting the non-default nic...")
-        self.nic_operation_VM(vm,
-                              create_tiernetwork.network,
-                              operation="update")
-        self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                         self.PasswordTest(True),
-                                         metadata=True,
-                                         userdata=expected_user_data1)
-        vpc_public_ip_2.delete(self.api_client)
-        self.nic_operation_VM(vm,
-                              create_tiernetwork2.network,
-                              operation="remove")
-        create_tiernetwork2.network.delete(self.api_client)
-
-        vm.password = vm.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm.name, vm.password))
+        self.update_default_nic(vm1, network1)
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+
+        self.delete(public_ip_2)
+        self.unplug_nic(vm1, network2)
+
+        # =====================================================================
+        # Another Multinic VM
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Reset password and update userdata on a multi nic vm")
+        multinicvm1 = self.when_I_deploy_a_vm([network2, network1])
+        self.when_I_reset_the_password(multinicvm1)
+        public_ip_3 = self.when_I_create_a_static_nat_ip_to(multinicvm1,
+                                                            network2)
+        self.then_config_drive_is_as_expected(
+            multinicvm1, public_ip_3,
+            metadata=True)
+
+        user_data2 = "hello multinicvm1"
+        self.update_and_validate_userdata(multinicvm1, user_data2, public_ip_3)
+
+        self.delete(multinicvm1, expunge=True)
+        self.delete(public_ip_3)
+        self.delete(network2)
+
+        # =====================================================================
+        # Network restart tests
+        # =====================================================================
+
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network without"
+                   " cleanup...")
+        self.when_I_reset_the_password(vm1)
+        self.when_I_restart_the_network_with(network1, cleanup=False)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network with"
+                   " cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=True)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+
+        # =====================================================================
+        # Nuage --
+        #   Update offering to VR
+        # =====================================================================
+
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after reboot")
+        vm1.reboot(self.api_client)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello afterboot", public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after migrate")
+        self.migrate_VM(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.debug("Updating userdata after migrating VM - %s" % vm1.name)
+        self.update_and_validate_userdata(vm1, "hello after migrate",
+                                          public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after stop/start")
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello afterstopstart",
+                                          public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after delete/recover")
+        self.delete_and_recover_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Start VM fails when ConfigDrive provider is disabled")
+        self.given_config_drive_provider_is("Disabled")
+        with self.assertRaises(Exception):
+            self.when_I_update_userdata(vm1, "hi with provider state Disabled")
+        self.given_config_drive_provider_is("Enabled")
+
+        self.delete(vm1, expunge=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Update Userdata on a VM that is not password enabled")
+        self.update_template(passwordenabled=False)
+        vm1 = self.when_I_deploy_a_vm_with_keypair_in(network1)
+
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network1)
+
+        self.update_and_validate_userdata(vm1,
+                                          "This is sample data",
+                                          public_ip_1,
+                                          metadata=True)
+
+        self.delete(vm1, expunge=True)
+        self.delete(network1)
+
+    @attr(tags=["advanced", "vpc"], required_hardware="true")
+    def test_configdrive_vpc_network(self):
+        """Test Configdrive for VPC Networks
+           choose user data with configDrive as service provider
+           and test password reset functionality using ConfigDrive
+        """
+
+        # 1. Given ConfigDrive provider is disabled in zone
+        #    And a network offering for VPC which has
+        #      user data provided by ConfigDrive
+        #    And a VPC
+        #    Then creating an VPC Tier in the VPC
+        #    using that network offering fails
+
+        # 2. Given ConfigDrive provider is enabled in zone
+        #    And a network offering for VPC which has
+        #      user data provided by ConfigDrive
+        #    And a VPC
+        #    When I create an VPC Tier in the VPC  using that network offering
+        #    Then the network is successfully created,
+        #    And is in the "Allocated" state.
+
+        # 3. When I deploy a VM in the created VPC tier with user data,
+        #    Then the network state is changed to "Implemented"
+        #    And the VM is successfully deployed and is in the "Running" state
+
+        # 4. And the user data in the ConfigDrive device is as expected
+        # 5. And the the vm password in the ConfigDrive device is as expected
+
+        # 6. When I stop, reset the password, and start the VM
+        # 7. Then I can login into the VM using the new password.
+        # 8. And the the vm password in the ConfigDrive device is the new one
+
+        # 9. Verify various scenarios and check the data in configdriveIso
+        # 10. Delete all the created objects (cleanup).
+
+        self.debug("+++ Scenario: creating an VPC tier with "
+                   "config drive fails when config drive provider is "
+                   "disabled.")
+        self.given_a_vpc()
+        self.given_config_drive_provider_is("Disabled")
+        self.given_a_network_offering_for_vpc_with_configdrive()
+        self.then_creating_a_vpc_tier_with_that_offering_fails()
+
+        self.debug("+++ Preparation Scenario: "
+                   "Create 2 tier with config drive "
+                   "when config drive provider is enabled.")
+
+        self.given_config_drive_provider_is("Enabled")
 
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created vpc with "
-                   "cleanup...")
-        self.restart_Vpc(create_vpc.vpc, cleanup=True)
-        self.validate_Vpc(create_vpc.vpc, state="Enabled")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created VPC Tier network without "
-                   "cleanup...")
-        create_tiernetwork.network.restart(self.api_client, cleanup=False)
-        self.validate_Network(create_tiernetwork.network,
-                              state="Implemented")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created VPC Tier network with "
-                   "cleanup...")
-        create_tiernetwork.network.restart(self.api_client, cleanup=True)
-        self.validate_Network(create_tiernetwork.network,
-                              state="Implemented")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ Restarting the created vpc without "
-                   "cleanup...")
-        self.restart_Vpc(create_vpc.vpc, cleanup=False)
-        self.validate_Vpc(create_vpc.vpc, state="Enabled")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True)
-
-        self.debug("+++ Restarting the created vpc with "
-                   "cleanup...")
-        self.restart_Vpc(create_vpc.vpc, cleanup=True)
-        self.validate_Vpc(create_vpc.vpc, state="Enabled")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data1,
-                                         metadata=True)
-
-        self.debug("+++ Verify userdata after rebootVM - %s" % vm.name)
+        create_network1 = self.when_I_create_a_vpc_tier_with_that_offering(
+            gateway='10.1.1.1')
+        self.then_the_network_is_successfully_created(create_network1)
+        self.then_the_network_has(create_network1, state="Implemented")
+
+        create_network2 = self.when_I_create_a_vpc_tier_with_that_offering(
+            gateway='10.1.2.1')
+        self.then_the_network_is_successfully_created(create_network2)
+        self.then_the_network_has(create_network2, state="Implemented")
+
+        network1 = create_network1.network
+        network2 = create_network2.network
+
+        self.given_template_password_enabled_is(True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Deploy VM in the Tier 1 with user data")
+        vm = self.when_I_deploy_a_vm(network1,
+                                     keypair=self.keypair.name)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm, network1)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        self.update_and_validate_userdata(vm, "helloworld vm1",
+                                          public_ip_1,
+                                          metadata=True)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        self.update_and_validate_sshkeypair(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Restarting the created vpc without cleanup...")
+        self.restart_Vpc(self.vpc, cleanup=False)
+        self.validate_vpc(self.vpc, state="Enabled")
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        # =====================================================================
+        self.debug("Adding a non-default nic to the VM "
+                   "making it a multi-nic VM...")
+        self.plug_nic(vm, network2)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        with self.stopped_vm(vm):
+            self.when_I_reset_the_password(vm)
+            self.when_I_update_userdata(vm, "hellomultinicvm1")
+
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        self.unplug_nic(vm, network2)
+        self.delete(network2)
+
+        # =====================================================================
+        # Network restart tests
+        # =====================================================================
+
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart VPC with cleanup...")
+        self.when_I_restart_the_vpc_with(cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart VPC without cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart tier with cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after reboot")
         vm.reboot(self.api_client)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         metadata=True,
-                                         userdata=expected_user_data1,
-                                         ssh_key=self.keypair)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm, "hello reboot", public_ip_1)
 
-        self.debug("Updating userdata for VM - %s" % vm.name)
-        expected_user_data = self.update_userdata(vm,
-                                                  "hellovm after reboot")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-        self.debug("Resetting password for VM - %s" % vm.name)
-        self.reset_password(vm)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password))
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
 
-        self.debug("+++ Migrating one of the VMs in the created "
-                   "VPC Tier network to another host, if available...")
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after migrate")
         self.migrate_VM(vm)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("Updating userdata after migrating VM - %s" % vm.name)
-        expected_user_data = self.update_userdata(vm,
-                                                  "hellovm after migrate")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password),
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-        self.debug("Resetting password for VM - %s" % vm.name)
-        self.reset_password(vm)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password))
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm, "hello migrate", public_ip_1)
 
-        self.debug("+++ Verify userdata after stopstartVM - %s" % vm.name)
-        vm.stop(self.api_client)
-        vm.start(self.api_client)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
 
-        self.debug("Updating userdata for VM - %s" % vm.name)
-        expected_user_data = self.update_userdata(vm,
-                                                  "hello after stopstart")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data,
-                                         ssh_key=self.keypair)
-        self.debug("Resetting password for VM - %s" % vm.name)
-        self.reset_password(vm)
-        self.debug("SSHing into the VM for verifying its new password "
-                   "after its password reset...")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(vm.password))
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after stop/start")
+        self.stop_and_start_vm(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
 
-        self.debug("+++ Verify userdata after recoverVM - %s" % vm.name)
-        vm.delete(self.api_client, expunge=False)
-        self.debug("Recover VM - %s" % vm.name)
-        vm.recover(self.api_client)
-        vm.start(self.api_client)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-        self.update_provider_state("Disabled")
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-
-        self.debug("+++ When template is not password enabled "
-                   "verify configdrive of VM - %s" % vm.name)
-        vm.delete(self.api_client, expunge=True)
-        self.update_provider_state("Enabled")
-        self.updateTemplate(False)
+        self.update_and_validate_userdata(vm, "hello stop/start", public_ip_1)
 
-        self.generate_ssh_keys()
-        self.debug("keypair name %s " % self.keypair.name)
-        vm = self.create_guest_vm(create_tiernetwork.network,
-                                  acl_item,
-                                  vpc=create_vpc.vpc,
-                                  keypair=self.keypair.name)
-
-        expected_user_data = self.update_userdata(vm,
-                                                  "This is sample data")
-        vpc_public_ip_1 = \
-            self.acquire_PublicIPAddress(create_tiernetwork.network,
-                                         create_vpc.vpc)
-        self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1,
-                                         create_tiernetwork.network)
-        self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                         self.PasswordTest(False),
-                                         userdata=expected_user_data,
-                                         metadata=True,
-                                         ssh_key=self.keypair)
-        vm.delete(self.api_client, expunge=True)
-        create_tiernetwork.network.delete(self.api_client)
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after delete/recover")
+        self.delete_and_recover_vm(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Verify configdrive when template is not password enabled")
+        self.given_config_drive_provider_is("Disabled")
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+        self.given_config_drive_provider_is("Enabled")
+
+        self.delete(vm, expunge=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Update Userdata on a VM that is not password enabled")
+
+        self.update_template(passwordenabled=False)
+
+        vm = self.when_I_deploy_a_vm(network1,
+                                     keypair=self.keypair.name)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm, network1)
+        self.update_and_validate_userdata(vm, "This is sample data",
+                                          public_ip_1,
+                                          metadata=True)
+
+        self.delete(vm, expunge=True)
+        self.delete(network1)
 
     @attr(tags=["advanced", "shared"], required_hardware="true")
     def test_configdrive_shared_network(self):
@@ -2121,7 +2160,7 @@ def test_configdrive_shared_network(self):
 
         self.debug("+++Testing configdrive in an shared network fails..."
                    "as provider configdrive is still disabled...")
-        self.update_provider_state("Disabled")
+        self.given_config_drive_provider_is("Disabled")
         shared_test_data = self.test_data["acl"]["network_all_1"]
         shared_network = self.verify_network_creation(
             offering_name="shared_network_config_drive_offering",
@@ -2130,7 +2169,7 @@ def test_configdrive_shared_network(self):
                          'Network found success = %s, expected success =%s'
                          % (str(shared_network.success), 'False'))
 
-        self.update_provider_state("Enabled")
+        self.given_config_drive_provider_is("Enabled")
         shared_network = self.verify_network_creation(
             offering=shared_network.offering, testdata=shared_test_data)
         self.assertTrue(shared_network.success,
@@ -2152,7 +2191,7 @@ def test_configdrive_shared_network(self):
         self.debug("+++Test user data & password reset functionality "
                    "using configdrive in an Isolated network")
 
-        self.update_password_enable_in_template(True)
+        self.given_template_password_enabled_is(True)
 
         self.generate_ssh_keys()
         self.debug("keypair name %s " % self.keypair.name)
@@ -2163,9 +2202,9 @@ def test_configdrive_shared_network(self):
         # Add subnet of different gateway
         self.debug("+++ Adding subnet of different gateway")
 
-        self.add_subnet_verify(
-            shared_network.network,
-            self.test_data["publiciprange2"])
+        self.add_subnet_to_shared_network_and_verify(shared_network.network,
+                                                     self.test_data[
+                                                         "publiciprange2"])
         self.test_data["virtual_machine"]["ipaddress"] = \
             self.test_data["acl"]["network_all_1"]["endip"]
 
@@ -2183,65 +2222,43 @@ def test_configdrive_shared_network(self):
             keypair=self.keypair.name)
         # Check VM
         self.check_VM_state(vm1, state="Running")
+        self.check_Router_state(network=shared_network.network,
+                                state="Running")
 
-        shared_vr = self.get_Router(shared_network.network)
-        self.check_Router_state(shared_vr, state="Running")
-
-        # We need to have the vm password
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-        self.update_userdata(vm1, "helloworld vm1")
+        self.when_I_update_userdata(vm1, "helloworld vm1")
 
         self.debug("Adding a non-default nic to the VM "
                    "making it a multi-nic VM...")
-        self.nic_operation_VM(vm1, shared_network2.network,
-                              operation="add")
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
+        self.plug_nic(vm1, shared_network2.network)
+        self.when_I_reset_the_password(vm1)
 
         self.debug("updating non-default nic as the default nic "
                    "of the multi-nic VM...")
-        self.nic_operation_VM(vm1,
-                              shared_network2.network, operation="update")
-        vm1.stop(self.api_client)
-        vm1.start(self.api_client)
+        self.update_default_nic(vm1, shared_network2.network)
+        self.when_I_reset_the_password(vm1)
 
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
-        self.update_userdata(vm1, "hellomultinicvm1")
+        self.when_I_update_userdata(vm1, "hellomultinicvm1")
 
         self.debug("Updating the default nic of the multi-nic VM, "
                    "deleting the non-default nic...")
-        self.nic_operation_VM(vm1,
-                              shared_network.network, operation="update")
+        self.update_default_nic(vm1, shared_network.network)
         vm1.stop(self.api_client)
         vm1.start(self.api_client)
 
-        self.nic_operation_VM(vm1,
-                              shared_network2.network, operation="remove")
-        shared_network2.network.delete(self.api_client)
-        # We need to have the vm password
-        vm1.password = vm1.resetPassword(self.api_client)
-        self.debug("Password reset to - %s" % vm1.password)
-        self.debug("VM - %s password - %s !" %
-                   (vm1.name, vm1.password))
+        self.unplug_nic(vm1,
+                        shared_network2.network)
+        self.delete(shared_network2.network)
 
         self.debug("+++ When template is not password enabled, "
                    "verify configdrive of VM - %s" % vm1.name)
-        vm1.delete(self.api_client, expunge=True)
-        self.update_provider_state("Enabled")
-        self.updateTemplate(False)
-        self.generate_ssh_keys()
-        self.debug("keypair name %s " % self.keypair.name)
+        self.delete(vm1, expunge=True)
+
+        self.given_config_drive_provider_is("Enabled")
+        self.update_template(passwordenabled=False)
+
         vm1 = self.create_VM(
             [shared_network.network],
             testdata=self.test_data["virtual_machine_userdata"],
             keypair=self.keypair.name)
-        vm1.delete(self.api_client, expunge=True)
-        shared_network.network.delete(self.api_client)
+        self.delete(vm1, expunge=True)
+        self.delete(shared_network.network)
diff --git a/test/integration/plugins/nuagevsp/nuageTestCase.py b/test/integration/plugins/nuagevsp/nuageTestCase.py
index 9d842aa3a93..7c9d424caaa 100644
--- a/test/integration/plugins/nuagevsp/nuageTestCase.py
+++ b/test/integration/plugins/nuagevsp/nuageTestCase.py
@@ -17,8 +17,19 @@
 
 """ Custom base class for Nuage VSP SDN plugin specific Marvin tests
 """
+import functools
+import importlib
+import logging
+import socket
+
+import sys
+import time
 # Import Local Modules
 from bambou.nurest_object import NURESTObject
+from marvin.cloudstackAPI import (restartVPC,
+                                  enableNuageUnderlayVlanIpRange,
+                                  disableNuageUnderlayVlanIpRange,
+                                  listNuageUnderlayVlanIpRanges)
 from marvin.cloudstackTestCase import cloudstackTestCase, unittest
 from marvin.lib.base import (Domain,
                              EgressFireWallRule,
@@ -44,22 +55,10 @@
 from marvin.lib.common import (get_domain,
                                get_template,
                                get_zone)
-from marvin.cloudstackAPI import (restartVPC,
-                                  enableNuageUnderlayVlanIpRange,
-                                  disableNuageUnderlayVlanIpRange,
-                                  listNuageUnderlayVlanIpRanges)
-
-from nuage_test_data import nuage_test_data
-from nuage_vsp_statistics import VsdDataCollector
-
 # Import System Modules
 from retry import retry
-import importlib
-import functools
-import logging
-import socket
-import time
-import sys
+
+from nuage_test_data import nuage_test_data
 
 
 class needscleanup(object):
@@ -89,38 +88,6 @@ def _wrapper(*args, **kwargs):
         return _wrapper
 
 
-class gherkin(object):
-    """Decorator to mark a method as Gherkin style.
-       Add extra colored logging
-    """
-    BLACK = "\033[0;30m"
-    BLUE = "\033[0;34m"
-    GREEN = "\033[0;32m"
-    CYAN = "\033[0;36m"
-    RED = "\033[0;31m"
-    BOLDBLUE = "\033[1;34m"
-    NORMAL = "\033[0m"
-
-    def __init__(self, method):
-        self.method = method
-
-    def __get__(self, obj=None, objtype=None):
-        @functools.wraps(self.method)
-        def _wrapper(*args, **kwargs):
-            gherkin_step = self.method.__name__.replace("_", " ").capitalize()
-            obj.info("=G= %s%s%s" % (self.BOLDBLUE, gherkin_step, self.NORMAL))
-            try:
-                result = self.method(obj, *args, **kwargs)
-                obj.info("=G= %s%s: [SUCCESS]%s" %
-                         (self.GREEN, gherkin_step, self.NORMAL))
-                return result
-            except Exception as e:
-                obj.info("=G= %s%s: [FAILED]%s%s" %
-                         (self.RED, gherkin_step, self.NORMAL, e))
-                raise
-        return _wrapper
-
-
 class nuageTestCase(cloudstackTestCase):
 
     @classmethod
@@ -291,7 +258,8 @@ def configureVSDSessions(cls):
             address=cls.nuage_vsp_device.hostname,
             user=cls.nuage_vsp_device.username,
             password=cls.nuage_vsp_device.password,
-            version=cls.nuage_vsp_device.apiversion[1] + "." + cls.nuage_vsp_device.apiversion[3]
+            version=cls.nuage_vsp_device.apiversion[1] +
+            "." + cls.nuage_vsp_device.apiversion[3]
         )
         vsd_api_client.new_session()
         cls.vsd = VSDHelpers(vsd_api_client)
@@ -391,7 +359,7 @@ def create_VpcOffering(cls, vpc_offering, suffix=None):
 
     # create_Vpc - Creates VPC with the given VPC offering
     @needscleanup
-    def create_Vpc(cls, vpc_offering, cidr='10.1.0.0/16', testdata=None,
+    def create_vpc(cls, vpc_offering, cidr='10.1.0.0/16', testdata=None,
                    account=None, networkDomain=None):
         """Creates VPC with the given VPC offering
         :param vpc_offering: vpc offering
@@ -466,7 +434,8 @@ def create_NetworkOffering(cls, net_offering, suffix=None,
     @needscleanup
     def create_Network(cls, nw_off, gateway="10.1.1.1",
                        netmask="255.255.255.0", vpc=None, acl_list=None,
-                       testdata=None, account=None, vlan=None, externalid=None):
+                       testdata=None, account=None, vlan=None,
+                       externalid=None):
         """Creates Network with the given Network offering
         :param nw_off: Network offering
         :type nw_off: NetworkOffering
@@ -502,7 +471,7 @@ def create_Network(cls, nw_off, gateway="10.1.1.1",
                                  vlan=vlan,
                                  externalid=externalid,
                                  vpcid=vpc.id if vpc else cls.vpc.id
-                                 if hasattr(cls, "vpc") else None,
+                                 if hasattr(cls, "vpc") and cls.vpc else None,
                                  aclid=acl_list.id if acl_list else None
                                  )
         cls.debug("Created network with ID - %s" % network.id)
@@ -513,12 +482,14 @@ def upgrade_Network(self, nw_off, network, forced=True):
         if not hasattr(nw_off, "id"):
             nw_off = self.create_NetworkOffering(nw_off)
         self.debug("Updating Network with ID - %s" % network.id)
-        network.update(self.api_client,
-                       networkofferingid=nw_off.id,
-                       changecidr=False,
-                       forced=forced
-                       )
+        updated_network =\
+            network.update(self.api_client,
+                           networkofferingid=nw_off.id,
+                           changecidr=False,
+                           forced=forced
+                           )
         self.debug("Updated network with ID - %s" % network.id)
+        return updated_network
 
     # delete_Network - Deletes the given network
     def delete_Network(self, network):
@@ -640,7 +611,8 @@ def acquire_PublicIPAddress(self, network, vpc=None, account=None):
                                            networkid=network.id
                                            if vpc is None else None,
                                            vpcid=vpc.id if vpc else self.vpc.id
-                                           if hasattr(self, "vpc") else None
+                                           if hasattr(self, "vpc") and self.vpc
+                                           else None
                                            )
         self.debug("Associated public IP address - %s with network with ID - "
                    "%s" % (public_ip.ipaddress.ipaddress, network.id))
@@ -733,7 +705,8 @@ def create_NetworkAclRule(self, rule, traffic_type="Ingress", network=None,
                                      traffictype=traffic_type
                                      )
 
-    def ssh_into_VM(self, vm, public_ip, reconnect=True, negative_test=False, keypair=None):
+    def ssh_into_VM(self, vm, public_ip, reconnect=True, negative_test=False,
+                    keypair=None):
         """Creates a SSH connection to the VM
 
         :returns: the SSH connection
@@ -753,7 +726,8 @@ def retry_ssh():
                 ipaddress=public_ip.ipaddress.ipaddress,
                 reconnect=reconnect,
                 retries=3 if negative_test else 30,
-                keyPairFileLocation=keypair.private_key_file if keypair else None
+                keyPairFileLocation=keypair.private_key_file
+                if keypair else None
             )
             self.debug("Successful to SSH into VM with ID - %s on "
                        "public IP address - %s" %
@@ -781,7 +755,6 @@ def execute_cmd(self, ssh_client, cmd):
             self.debug("SSH client executed command result is None")
         return ret_data
 
-
     def wget_from_server(self, public_ip, port=80, file_name="index.html",
                          disable_system_proxies=True):
         """Fetches file with the given file name from a web server
@@ -825,10 +798,9 @@ def validate_NetworkServiceProvider(self, provider_name, state=None):
             name=provider_name,
             physicalnetworkid=self.vsp_physical_network.id
         )
-        self.assertEqual(isinstance(providers, list), True,
-                         "List Network Service Provider should return a "
-                         "valid list"
-                         )
+        self.assertIsInstance(providers, list,
+                              "List Network Service Provider should return a "
+                              "valid list")
         self.assertEqual(provider_name, providers[0].name,
                          "Name of the Network Service Provider should match "
                          "with the returned list data"
@@ -873,7 +845,7 @@ def validate_VpcOffering(self, vpc_offering, state=None):
         self.debug("Successfully validated the creation and state of VPC "
                    "offering - %s" % vpc_offering.name)
 
-    def validate_Vpc(self, vpc, state=None):
+    def validate_vpc(self, vpc, state=None):
         """Validates the VPC
 
         Fetches the vpc by id,
@@ -989,7 +961,7 @@ def check_VM_state(self, vm, state=None):
         self.debug("Successfully validated the deployment and state of VM - %s"
                    % vm.name)
 
-    def check_Router_state(self, router, state=None):
+    def check_Router_state(self, router=None, network=None, state=None):
         """Validates the Router state
             :param router: cs object
             :type router: Router
@@ -997,12 +969,17 @@ def check_Router_state(self, router, state=None):
             :raise AssertionError when router isn't found,
                 or has an incorrect state."""
 
-        self.debug("Validating the deployment and state of Router - %s" %
-                   router.name)
-        routers = Router.list(self.api_client,
-                              id=router.id,
-                              listall=True
-                              )
+        if router:
+            self.debug("Validating the deployment and state of Router - %s" %
+                       router.name)
+            routers = Router.list(self.api_client, id=router.id,
+                                  listall=True)
+        elif network:
+            routers = Router.list(self.api_client, networkid=network.id,
+                                  listall=True)
+        else:
+            raise AttributeError("Either router or network "
+                                 "has to be specified")
         self.assertEqual(isinstance(routers, list), True,
                          "List router should return a valid list"
                          )
@@ -1011,7 +988,9 @@ def check_Router_state(self, router, state=None):
                              "Virtual router is not in the expected state"
                              )
         self.debug("Successfully validated the deployment and state of Router "
-                   "- %s" % router.name)
+                   "- %s" % routers[0].name)
+
+        return routers[0]
 
     def validate_PublicIPAddress(self, public_ip, network, static_nat=False,
                                  vm=None):
@@ -1245,7 +1224,6 @@ class NULL_NAMESPACE:
             self.debug("Failed to get the subnet id due to %s" % e)
             self.fail("Unable to get the subnet id, failing the test case")
 
-
     def verify_vsd_shared_network(self, domain_id, network,
                                   gateway="10.1.1.1"):
         """Verifies the given CloudStack domain and
@@ -1324,6 +1302,8 @@ def verify_vsd_vm(self, vm, stopped=False):
                             )
         vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0]
         for nic in vm_info.nic:
+            if nic.type == "Shared":
+                continue
             vsd_subnet = self.vsd.get_subnet(
                 filter=self.get_externalID_filter(nic.networkid))
             vsd_vport = self.vsd.get_vport(
diff --git a/test/integration/plugins/nuagevsp/nuage_lib.py b/test/integration/plugins/nuagevsp/nuage_lib.py
index fc14d297518..d248b207390 100644
--- a/test/integration/plugins/nuagevsp/nuage_lib.py
+++ b/test/integration/plugins/nuagevsp/nuage_lib.py
@@ -14,6 +14,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import functools
 
 from marvin.cloudstackAPI import createSSHKeyPair, deleteSSHKeyPair
 
@@ -44,4 +45,54 @@ def delete(self, apiclient):
         cmd.name = self.name
         cmd.account = self.account
         cmd.domainid = self.domainid
-        apiclient.deleteSSHKeyPair(cmd)
\ No newline at end of file
+        apiclient.deleteSSHKeyPair(cmd)
+
+
+class GherkinMetaClass(type):
+    def __new__(mcs, name, bases, namespace):
+        namespace = {
+            k: gherkin(v)
+            if k.startswith('given_') or
+            k.startswith('when_') or
+            k.startswith('then_')
+            else v for k, v in namespace.items()
+        }
+        return super(GherkinMetaClass, mcs)\
+            .__new__(mcs, name, bases, namespace)
+
+
+class gherkin(object):
+    """Decorator to mark a method as Gherkin style.
+       Add extra colored logging
+    """
+    BLACK = "\033[0;30m"
+    BLUE = "\033[0;34m"
+    GREEN = "\033[0;32m"
+    CYAN = "\033[0;36m"
+    RED = "\033[0;31m"
+    BOLDBLUE = "\033[1;34m"
+    NORMAL = "\033[0m"
+
+    def __init__(self, method):
+        self.method = method
+
+    def __get__(self, obj=None, objtype=None):
+        @functools.wraps(self.method)
+        def _wrapper(*args, **kwargs):
+            if self.method.__doc__:
+                gherkin_step = self.method.__doc__.format(*args, **kwargs)
+            else:
+                gherkin_step = self.method.__name__\
+                    .replace("_", " ")\
+                    .capitalize()
+            obj.info("=G= %s%s%s" % (self.BOLDBLUE, gherkin_step, self.NORMAL))
+            try:
+                result = self.method(obj, *args, **kwargs)
+                obj.info("=G= %s%s: [SUCCESS]%s" %
+                         (self.GREEN, gherkin_step, self.NORMAL))
+                return result
+            except Exception as e:
+                obj.info("=G= %s%s: [FAILED]%s%s" %
+                         (self.RED, gherkin_step, self.NORMAL, e))
+                raise
+        return _wrapper
diff --git a/test/integration/plugins/nuagevsp/test_nuage_configdrive.py b/test/integration/plugins/nuagevsp/test_nuage_configdrive.py
index 53a22bcb1a7..731694822f0 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_configdrive.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_configdrive.py
@@ -22,28 +22,21 @@
 import base64
 import copy
 import os
-import tempfile
 import threading
 
-import sys
-import time
 from datetime import datetime
-from marvin.cloudstackAPI import updateTemplate, resetSSHKeyForVirtualMachine
 from marvin.lib.base import (Account,
-                             createVlanIpRange,
-                             listVlanIpRanges,
                              NetworkServiceProvider,
                              PublicIpRange,
                              PublicIPAddress,
                              VirtualMachine)
-from marvin.lib.common import list_templates
-from marvin.lib.utils import random_gen
 # Import System Modules
 from nose.plugins.attrib import attr
+from nuage_lib import GherkinMetaClass
 
 # Import Local Modules
-from component.test_configdrive import MySSHKeyPair, ConfigDriveUtils
-from nuageTestCase import nuageTestCase, needscleanup
+from component.test_configdrive import ConfigDriveUtils
+from nuageTestCase import nuageTestCase
 
 NO_SUCH_FILE = "No such file or directory"
 
@@ -53,6 +46,8 @@ class TestNuageConfigDrive(nuageTestCase, ConfigDriveUtils):
     using configDrive with Nuage VSP SDN plugin
     """
 
+    __metaclass__ = GherkinMetaClass
+
     class StartVM(threading.Thread):
 
         def __init__(self, nuagetestcase, network, index):
@@ -111,10 +106,10 @@ def __init__(self, nuagetestcase, vm, **kwargs):
 
         def run(self):
             self.expected_user_data = "hello world vm %s" % self.vm.name
-            user_data = base64.b64encode(self.expected_user_data)
             self.end = None
             self.start = datetime.now()
-            self.vm.update(self.nuagetestcase.api_client, userdata=user_data)
+            self.nuagetestcase.when_I_update_userdata(self.vm,
+                                                      self.expected_user_data)
             self.end = datetime.now()
             self.nuagetestcase.debug("[Concurrency]Update userdata idx=%d "
                                      "for vm: %s. Duration in seconds: %s " %
@@ -146,12 +141,7 @@ def __init__(self, nuagetestcase, vm, **kwargs):
 
         def run(self):
             self.start = datetime.now()
-            self.vm.password = self.vm.resetPassword(
-                self.nuagetestcase.api_client)
-            self.nuagetestcase.debug("[Concurrency]Password reset to - %s"
-                                     % self.vm.password)
-            self.nuagetestcase.debug("[Concurrency]VM - %s password - %s !"
-                                     % (self.vm.name, self.vm.password))
+            self.nuagetestcase.when_I_reset_the_password(self.vm)
             self.end = datetime.now()
             self.nuagetestcase.debug("[Concurrency]Reset password for vm: %s. "
                                      "Duration in seconds: %s "
@@ -174,6 +164,10 @@ def get_password(self):
         def get_name():
             return "reset password"
 
+    def __init__(self, methodName='runTest'):
+        super(TestNuageConfigDrive, self).__init__(methodName)
+        ConfigDriveUtils.__init__(self)
+
     @classmethod
     def setUpClass(cls):
         super(TestNuageConfigDrive, cls).setUpClass()
@@ -188,6 +182,7 @@ def setUp(self):
                                       )
         self.tmp_files = []
         self.cleanup = [self.account]
+        self.generate_ssh_keys()
         return
 
     def tearDown(self):
@@ -195,14 +190,29 @@ def tearDown(self):
         for tmp_file in self.tmp_files:
             os.remove(tmp_file)
 
-        self.updateTemplate(False)
+        self.update_template(passwordenabled=False)
         return
 
-    def validate_firewall_rule(self, fw_rule):
+    def validate_acl_rule(self, fw_rule):
         self.verify_vsd_firewall_rule(fw_rule)
 
     def validate_StaticNat_rule_For_VM(self, public_ip, network, vm):
-        self.verify_vsd_floating_ip(network, vm, public_ip.ipaddress)
+        self.verify_vsd_floating_ip(network, vm, public_ip.ipaddress, self.vpc)
+
+    def validate_vm_networking(self, vm):
+        self.verify_vsd_vm(vm)
+
+    def validate_network_networking(self, network, vpc):
+        self.verify_vsd_network(self.domain.id, network, vpc=vpc)
+
+    def validate_shared_networking(self, network, vm):
+        # Verify shared Network and VM in VSD
+        subnet_id = self.get_subnet_id(network.id, network.gateway)
+
+        self.verify_vsd_shared_network(self.domain.id, network,
+                                       gateway=network.gateway)
+        self.verify_vsd_enterprise_vm(self.domain.id, network, vm,
+                                      sharedsubnetid=subnet_id)
 
     def _get_test_data(self, key):
         return self.test_data["nuagevsp"][key]
@@ -213,39 +223,89 @@ def get_configdrive_provider(self):
             name="ConfigDrive",
             physicalnetworkid=self.vsp_physical_network.id)[0]
 
-    def create_guest_vm(self, networks, acl_item=None,
-                        vpc=None, keypair=None):
-        vm = self.create_VM(
-            networks,
-            testdata=self.test_data["virtual_machine_userdata"],
-            keypair=keypair)
-        # Check VM
-        self.check_VM_state(vm, state="Running")
-        self.verify_vsd_vm(vm)
-        # Check networks
-        network_list = []
-        if isinstance(networks, list):
-            for network in networks:
-                network_list.append(network)
-        else:
-            network_list.append(networks)
+    def get_vpc_offering_name(self):
+        return "vpc_offering_configdrive_withoutdns"
 
-        for network in network_list:
-            self.validate_Network(network, state="Implemented")
-            self.verify_vsd_network(self.domain.id, network, vpc=vpc)
+    def get_network_offering_name(self):
+        return "isolated_configdrive_network_offering_withoutdns"
 
-        if acl_item is not None:
-            self.verify_vsd_firewall_rule(acl_item)
-        return vm
+    def get_network_offering_name_for_vpc(self):
+        return "vpc_network_offering_configdrive_withoutdns"
 
     # =========================================================================
     # ---                    Gherkin style helper methods                   ---
     # =========================================================================
 
+    def then_vr_is_as_expected(self, network):
+        """Then there is a VR or not in network {network.id}"""
+        if "Dns" in [s.name for s in network.service]:
+            vr = self.check_Router_state(network=network, state="Running")
+            self.verify_vsd_router(vr)
+        else:
+            with self.assertRaises(Exception):
+                self.get_Router(network)
+            self.debug("+++Verified no VR is spawned for this network ")
+
+    def when_I_change_the_network_offering_to(self, network, offering_name):
+        updated_network =\
+            self.upgrade_Network(self._get_test_data(offering_name), network)
+        network.service = updated_network.service
+
     # =========================================================================
     # ---                            TEST CASES                             ---
     # =========================================================================
 
+    @attr(tags=["advanced", "nuagevsp", "isonw"], required_hardware="true")
+    def test_nuage_config_drive_isolated_network_with_vr(self):
+        self.debug("+++Test user data & password reset functionality "
+                   "using configdrive in an Isolated network with VR")
+
+        self.given_config_drive_provider_is("Enabled")
+        self.given_template_password_enabled_is(True)
+        self.given_a_network_offering("isolated_configdrive_network_offering")
+        create_vrnetwork =\
+            self.when_I_create_a_network_with_that_offering(gateway='10.1.3.1')
+        self.then_the_network_is_successfully_created(create_vrnetwork)
+        self.then_the_network_has(create_vrnetwork, state="Allocated")
+
+        vrnetwork = create_vrnetwork.network
+
+        vm = self.when_I_deploy_a_vm(vrnetwork)
+        self.then_vr_is_as_expected(network=vrnetwork)
+
+        # We need to have the vm password
+        self.when_I_reset_the_password(vm)
+        public_ip = self.when_I_create_a_static_nat_ip_to(vm, vrnetwork)
+
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        self.update_and_validate_userdata(vm, "helloworld vm", public_ip)
+        self.then_config_drive_is_as_expected(vm, public_ip)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network without"
+                   " cleanup...")
+        self.when_I_restart_the_network_with(vrnetwork, cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network with"
+                   " cleanup...")
+        self.when_I_restart_the_network_with(vrnetwork, cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        self.debug("+++ Upgrade offering of created Isolated network with "
+                   "an offering which removes the VR...")
+        self.when_I_change_the_network_offering_to(
+            vrnetwork, "isolated_configdrive_network_offering_withoutdns")
+        self.then_vr_is_as_expected(network=vrnetwork)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        vm.delete(self.api_client, expunge=True)
+        vrnetwork.delete(self.api_client)
+
     @attr(tags=["advanced", "nuagevsp", "isonw"], required_hardware="true")
     def test_nuage_configdrive_isolated_network(self):
         """Test Configdrive as provider for isolated Networks
@@ -253,6 +313,12 @@ def test_nuage_configdrive_isolated_network(self):
            with Nuage VSP SDN plugin
         """
 
+        # 1. Given ConfigDrive provider is disabled in zone
+        #    And a network offering which has
+        #      user data provided by ConfigDrive
+        #    Then creating an Isolated Network
+        #    using that network offering fails
+
         # 2. Given ConfigDrive provider is enabled in zone
         #    And a network offering which has
         #    * user data provided by ConfigDrive
@@ -260,427 +326,292 @@ def test_nuage_configdrive_isolated_network(self):
         #    When I create an Isolated Network using that network offering
         #    Then the network is successfully created,
         #    And is in the "Allocated" state.
-        #
+
         # 3. When I deploy a VM in the created Isolated network with user data,
         #    Then the Isolated network state is changed to "Implemented"
         #    And the VM is successfully deployed and is in the "Running" state
         #    And there is no VR is deployed.
         # 4. And the user data in the ConfigDrive device is as expected
         # 5. And the the vm's password in the ConfigDrive device is as expected
-        #
+
         # 6. When I stop, reset the password, and start the VM
         # 7. Then I can login into the VM using the new password.
         # 8. SSH into the VM for verifying its new password
         #     after its password reset.
+
         # 9. Verify various scenarios and check the data in configdriveIso
         # 10. Delete all the created objects (cleanup).
 
-        for zone in self.zones:
-            self.debug("Zone - %s" % zone.name)
-            # Get Zone details
-            self.getZoneDetails(zone=zone)
-            # Configure VSD sessions
-            self.configureVSDSessions()
-
-            # 1. Given ConfigDrive provider is disabled in zone
-            #    And a network offering which has
-            #      user data provided by ConfigDrive
-            #    Then creating an Isolated Network
-            #    using that network offering fails
-
-            self.debug("+++Testing configdrive in an Isolated network fails..."
-                       "as provider configdrive is still disabled...")
-            self.update_provider_state("Disabled")
-            create_network = self.verify_network_creation(
-                offering_name="isolated_configdrive_network_offering_"
-                              "withoutdns",
-                gateway='10.1.1.1')
-            self.assertFalse(create_network.success,
-                             'Network found success = %s, expected success =%s'
-                             % (str(create_network.success), 'False'))
-
-
-
-            self.debug("+++Test user data & password reset functionality "
-                       "using configdrive in an Isolated network without VR")
-            self.update_provider_state("Enabled")
-            create_network1 = self.verify_network_creation(
-                offering=create_network.offering,
-                gateway='10.1.1.1')
-            self.assertTrue(create_network1.success,
-                            'Network found success = %s, expected success = %s'
-                            % (str(create_network1.success), 'True'))
-            self.validate_Network(create_network1.network, state="Allocated")
-            create_network2 = self.verify_network_creation(
-                offering=create_network.offering,
-                gateway='10.1.2.1')
-            self.assertTrue(create_network2.success,
-                            'Network found success = %s,expected success = %s'
-                            % (str(create_network2.success), 'True'))
-            self.validate_Network(create_network2.network, state="Allocated")
-            self.update_password_enable_in_template(True)
-
-            self.debug("+++Deploy VM in the created Isolated network "
-                       "with as user data provider configdrive without VR")
-
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm1 = self.create_guest_vm(create_network1.network,
-                                       keypair=self.keypair.name)
+        self.debug("+++ Scenario: creating an Isolated network with "
+                   "config drive fails when config drive provider is "
+                   "disabled.")
+        self.given_config_drive_provider_is("Disabled")
+        self.given_a_network_offering_with_configdrive()
+        self.then_creating_a_network_with_that_offering_fails()
+
+        self.debug("+++ Preparation Scenario: "
+                   "creating an Isolated networks with "
+                   "config drive when config drive provider is "
+                   "enabled.")
+
+        self.given_config_drive_provider_is("Enabled")
+
+        create_network1 = self.when_I_create_a_network_with_that_offering()
+        self.then_the_network_is_successfully_created(create_network1)
+        self.then_the_network_has(create_network1, state="Allocated")
+
+        create_network2 = self.when_I_create_a_network_with_that_offering()
+        self.then_the_network_is_successfully_created(create_network2)
+        self.then_the_network_has(create_network2, state="Allocated")
+
+        network1 = create_network1.network
+        network2 = create_network2.network
+
+        self.given_template_password_enabled_is(True)
+
+        self.debug("+++Deploy VM in the created Isolated network "
+                   "with user data provider as configdrive")
+
+        vm1 = self.when_I_deploy_a_vm_with_keypair_in(network1)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network1)
+
+        self.then_vr_is_as_expected(network=network1)
+        self.then_config_drive_is_as_expected(
+            vm1, public_ip_1,
+            metadata=True)
+
+        self.update_and_validate_userdata(vm1, "helloworld vm1", public_ip_1)
+        self.update_and_validate_sshkeypair(vm1, public_ip_1)
+
+        # =====================================================================
+
+        self.debug("Adding a non-default nic to the VM "
+                   "making it a multi-nic VM...")
+        self.plug_nic(vm1, network2)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        with self.stopped_vm(vm1):
+            self.when_I_reset_the_password(vm1)
+            self.when_I_update_userdata(vm1, "hellomultinicvm1")
+
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        # Test using network2 as default network
+        # =====================================================================
+
+        self.debug("updating non-default nic as the default nic "
+                   "of the multi-nic VM and enable staticnat...")
+        self.update_default_nic(vm1, network2)
+
+        public_ip_2 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network2)
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2, metadata=True)
+
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2)
+
+        user_data = "hellomultinicvm1again"
+        self.update_and_validate_userdata(vm1, user_data, public_ip_2)
+
+        self.debug("Updating the default nic of the multi-nic VM, "
+                   "deleting the non-default nic...")
+        self.update_default_nic(vm1, network1)
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+
+        self.delete(public_ip_2)
+        self.unplug_nic(vm1, network2)
+
+        # =====================================================================
+        # Another Multinic VM
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Reset password and update userdata on a multi nic vm")
+        multinicvm1 = self.when_I_deploy_a_vm([network2, network1])
+        self.when_I_reset_the_password(multinicvm1)
+        public_ip_3 = self.when_I_create_a_static_nat_ip_to(multinicvm1,
+                                                            network2)
+        self.then_config_drive_is_as_expected(
+            multinicvm1, public_ip_3,
+            metadata=True)
+
+        user_data2 = "hello multinicvm1"
+        self.update_and_validate_userdata(multinicvm1, user_data2, public_ip_3)
+
+        self.delete(multinicvm1, expunge=True)
+        self.delete(public_ip_3)
+        self.delete(network2)
+
+        # =====================================================================
+        # Network restart tests
+        # =====================================================================
+
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network without"
+                   " cleanup...")
+        self.when_I_reset_the_password(vm1)
+        self.when_I_restart_the_network_with(network1, cleanup=False)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart Isolated network with"
+                   " cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=True)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        # Nuage --
+        #   Update offering to VR
+        # =====================================================================
+        self.debug("+++ Upgrade offering of created Isolated network with "
+                   "a dns offering which spins a VR")
+        self.upgrade_Network(self.test_data["nuagevsp"][
+                                 "isolated_configdrive_network_offering"],
+                             create_network1.network)
+        vr = self.get_Router(create_network1.network)
+        self.check_Router_state(vr, state="Running")
+        # VSD verification
+        self.verify_vsd_network(self.domain.id, create_network1.network)
+        self.verify_vsd_router(vr)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after reboot")
+        vm1.reboot(self.api_client)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello afterboot", public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after migrate")
+        self.migrate_VM(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.debug("Updating userdata after migrating VM - %s" % vm1.name)
+        self.update_and_validate_userdata(vm1, "hello after migrate",
+                                          public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after stop/start")
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello afterstopstart",
+                                          public_ip_1)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after delete/recover")
+        self.delete_and_recover_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_1,
+                                              metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Start VM fails when ConfigDrive provider is disabled")
+        self.given_config_drive_provider_is("Disabled")
+        with self.assertRaises(Exception):
+            self.when_I_update_userdata(vm1, "hi with provider state Disabled")
+        self.given_config_drive_provider_is("Enabled")
+
+        self.delete(vm1, expunge=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Update Userdata on a VM that is not password enabled")
+        self.update_template(passwordenabled=False)
+        vm1 = self.when_I_deploy_a_vm_with_keypair_in(network1)
+
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm1, network1)
+
+        self.update_and_validate_userdata(vm1,
+                                          "This is sample data",
+                                          public_ip_1,
+                                          metadata=True)
 
-            with self.assertRaises(Exception):
-                self.get_Router(create_network1)
-            self.debug("+++Verified no VR is spawned for this network ")
-            # We need to have the vm password
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-
-            public_ip_1 = self.acquire_PublicIPAddress(create_network1.network)
-            self.create_and_verify_fip_and_fw(vm1, public_ip_1,
-                                              create_network1.network)
-
-            self.verify_config_drive_content(
-                vm1, public_ip_1,
-                self.PasswordTest(vm1.password),
-                metadata=True,
-                userdata=self.test_data[
-                    "virtual_machine_userdata"]["userdata"],
-                ssh_key=self.keypair)
-
-            expected_user_data1 = self.update_userdata(vm1, "helloworld vm1")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data1)
-
-            self.generate_ssh_keys()
-            self.update_sshkeypair(vm1)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(True),
-                                             metadata=True,
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-            # After sshkey reset we need to have the vm password again
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-
-            self.debug("Adding a non-default nic to the VM "
-                       "making it a multi-nic VM...")
-            self.nic_operation_VM(vm1, create_network2.network,
-                                  operation="add")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             metadata=True,
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hellomultinicvm1")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("updating non-default nic as the default nic "
-                       "of the multi-nic VM and enable staticnat...")
-            self.nic_operation_VM(vm1,
-                                  create_network2.network, operation="update")
-
-            public_ip_2 = \
-                self.acquire_PublicIPAddress(create_network2.network)
-            self.create_and_verify_fip_and_fw(vm1, public_ip_2,
-                                              create_network2.network)
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hellomultinicvm1")
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data1)
-
-            self.debug("Updating the default nic of the multi-nic VM, "
-                       "deleting the non-default nic...")
-            self.nic_operation_VM(vm1,
-                                  create_network1.network, operation="update")
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-
-            multinicvm1 = self.create_guest_vm([create_network2.network,
-                                                create_network1.network])
-            multinicvm1.password = multinicvm1.resetPassword(self.api_client)
-            self.debug("MultiNICVM Password reset to - %s"
-                       % multinicvm1.password)
-            self.debug("MultiNICVM - %s password - %s !"
-                       % (multinicvm1.name, multinicvm1.password))
-
-            public_ip_3 = self.acquire_PublicIPAddress(create_network2.network)
-            self.create_and_verify_fip_and_fw(multinicvm1, public_ip_3,
-                                              create_network2.network)
-            self.verify_config_drive_content(
-                multinicvm1, public_ip_3,
-                self.PasswordTest(multinicvm1.password),
-                metadata=True,
-                userdata=self.test_data[
-                    "virtual_machine_userdata"]["userdata"])
-            expected_user_data2 = self.update_userdata(multinicvm1,
-                                                       "hello multinicvm1")
-            self.verify_config_drive_content(multinicvm1, public_ip_3,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data2)
-
-            multinicvm1.delete(self.api_client, expunge=True)
-            public_ip_3.delete(self.api_client)
-            public_ip_2.delete(self.api_client)
-            self.nic_operation_VM(vm1,
-                                  create_network2.network, operation="remove")
-            create_network2.network.delete(self.api_client)
-
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-
-            self.debug("+++ Restarting the created Isolated network without "
-                       "VR without cleanup...")
-            create_network1.network.restart(self.api_client, cleanup=False)
-            self.validate_Network(create_network1.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ Restarting the created Isolated network without "
-                       "VR with cleanup...")
-            create_network1.network.restart(self.api_client, cleanup=True)
-            self.validate_Network(create_network1.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ Upgrade offering of created Isolated network with "
-                       "a dns offering which spins a VR")
-            self.upgrade_Network(self.test_data["nuagevsp"][
-                                     "isolated_configdrive_network_offering"],
-                                 create_network1.network)
-            vr = self.get_Router(create_network1.network)
-            self.check_Router_state(vr, state="Running")
-            # VSD verification
-            self.verify_vsd_network(self.domain.id, create_network1.network)
-            self.verify_vsd_router(vr)
-
-            self.debug("+++Test user data & password reset functionality "
-                       "using configdrive in an Isolated network with VR")
-            create_vrnetwork1 = self.verify_network_creation(
-                offering_name="isolated_configdrive_network_offering",
-                gateway='10.1.3.1')
-            self.assertTrue(create_vrnetwork1.success,
-                            'Network found success = %s, expected success = %s'
-                            % (str(create_vrnetwork1.success), 'True'))
-            self.validate_Network(create_vrnetwork1.network, state="Allocated")
-            self.debug("+++Deploying a VM in the created Isolated network "
-                       "with as user data provider configdrive with VR")
-            vm2 = self.create_guest_vm(create_vrnetwork1.network)
-
-            vr2 = self.get_Router(create_vrnetwork1.network)
-            self.check_Router_state(vr2, state="Running")
-
-            # VSD verification
-            self.verify_vsd_network(self.domain.id, create_vrnetwork1.network)
-            self.verify_vsd_router(vr2)
-            self.debug("+++Verified VR is spawned for this network ")
-
-            # We need to have the vm password
-            vm2.password = vm2.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm2.password)
-            self.debug("VM2 - %s password - %s !" %
-                       (vm2.name, vm2.password))
-            public_ip_3 = self.acquire_PublicIPAddress(
-                create_vrnetwork1.network)
-            self.create_and_verify_fip_and_fw(vm2, public_ip_3,
-                                              create_vrnetwork1.network)
-
-            self.verify_config_drive_content(
-                vm2, public_ip_3,
-                self.PasswordTest(vm2.password),
-                metadata=True,
-                userdata=self.test_data[
-                    "virtual_machine_userdata"]["userdata"])
-
-            expected_user_data2 = self.update_userdata(vm2, "helloworld vm2")
-            self.verify_config_drive_content(vm2, public_ip_3,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2)
-
-            self.debug("+++ Restarting the created Isolated network with "
-                       "VR without cleanup...")
-            create_vrnetwork1.network.restart(self.api_client, cleanup=False)
-            self.validate_Network(create_vrnetwork1.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm2, public_ip_3,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Restarting the created Isolated network with "
-                       "VR with cleanup...")
-            create_vrnetwork1.network.restart(self.api_client, cleanup=True)
-            self.validate_Network(create_vrnetwork1.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm2, public_ip_3,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Upgrade offering of created Isolated network with "
-                       "an offering which removes the VR...")
-            self.upgrade_Network(
-                self.test_data["nuagevsp"][
-                    "isolated_configdrive_network_offering_withoutdns"],
-                create_vrnetwork1.network)
-            with self.assertRaises(Exception):
-                self.get_Router(create_vrnetwork1.network)
-
-            self.verify_config_drive_content(vm2, public_ip_3,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-            vm2.delete(self.api_client, expunge=True)
-            create_vrnetwork1.network.delete(self.api_client)
-
-            self.debug("+++Verifying userdata after rebootVM - %s" % vm1.name)
-            vm1.reboot(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             metadata=True,
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1, "hello afterboot")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++ Migrating one of the VMs in the created Isolated "
-                       "network to another host, if available...")
-            self.migrate_VM(vm1)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata after migrating VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello after migrate")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++Verify userdata after stopstartVM - %s" % vm1.name)
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello afterstopstart")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++ Verify userdata after VM recover- %s" % vm1.name)
-            vm1.delete(self.api_client, expunge=False)
-            self.debug("Recover VM - %s" % vm1.name)
-            vm1.recover(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            self.update_provider_state("Disabled")
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello after recover")
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ When template is not password enabled, "
-                       "verify configdrive of VM - %s" % vm1.name)
-            vm1.delete(self.api_client, expunge=True)
-            self.update_provider_state("Enabled")
-            self.updateTemplate(False)
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm1 = self.create_guest_vm(create_network1.network,
-                                       keypair=self.keypair.name)
-
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "This is sample data")
-            public_ip_1 = \
-                self.acquire_PublicIPAddress(create_network1.network)
-            self.create_and_verify_fip_and_fw(vm1, public_ip_1,
-                                              create_network1.network)
-            self.verify_config_drive_content(vm1, public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            vm1.delete(self.api_client, expunge=True)
-            create_network1.network.delete(self.api_client)
+    @attr(tags=["advanced", "nuagevsp", "vpc"], required_hardware="true")
+    def test_nuage_configdrive_vpc_network_with_vr(self):
+        self.debug("Testing user data & password reset functionality "
+                   " using configdrive in a VPC network with VR...")
+
+        self.given_config_drive_provider_is("Enabled")
+        self.given_template_password_enabled_is(True)
+        self.given_a_vpc_with_offering("vpc_offering_configdrive_withdns")
+        self.given_a_network_offering(
+                "vpc_network_offering_configdrive_withdns")
+        create_tier = self.when_I_create_a_vpc_tier_with_that_offering(
+            gateway='10.1.3.1')
+        self.then_the_network_is_successfully_created(create_tier)
+        self.then_the_network_has(create_tier, state="Implemented")
+
+        tier = create_tier.network
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Deploy VM in the Tier 1 with user data")
+        vm = self.when_I_deploy_a_vm(tier)
+        self.then_vr_is_as_expected(network=tier)
+
+        public_ip = \
+            self.when_I_create_a_static_nat_ip_to(vm, tier)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip)
+
+        self.update_and_validate_userdata(vm, "helloworld vm2", public_ip,
+                                          metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Restarting the created vpc without cleanup...")
+        self.when_I_restart_the_vpc_with(cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart VPC with cleanup...")
+        self.when_I_restart_the_vpc_with(cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip,
+                                              metadata=True, reconnect=False)
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart tier without cleanup...")
+        self.when_I_restart_the_network_with(tier, cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart tier with cleanup...")
+        self.when_I_restart_the_network_with(tier, cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip,
+                                              metadata=True, reconnect=False)
+
+        self.debug("+++ Upgrade offering of created VPC network with "
+                   "an offering which removes the VR...")
+        self.when_I_change_the_network_offering_to(
+            tier, "vpc_network_offering_configdrive_withoutdns")
+        self.then_vr_is_as_expected(network=tier)
+        self.then_config_drive_is_as_expected(vm, public_ip,
+                                              metadata=True, reconnect=False)
+
+        vm.delete(self.api_client, expunge=True)
+        tier.delete(self.api_client)
+        self.vpc.delete(self.api_client)
 
     @attr(tags=["advanced", "nuagevsp", "vpc"], required_hardware="true")
     def test_nuage_configdrive_vpc_network(self):
@@ -690,470 +621,186 @@ def test_nuage_configdrive_vpc_network(self):
            with Nuage VSP SDN plugin
         """
 
-        # 1. Verify VPC Network creation with ConfigDrive fails
-        #    as ConfigDrive is disabled as provider
-        # 2. Create a VPC Network with Nuage VSP VPC tier Network
-        #    offering specifying ConfigDrive as serviceProvider for userdata,
-        #    make sure no Dns is in the offering so no VR is spawned.
-        #    check if it is successfully created and is in "Allocated" state.
-        # 3. Deploy a VM in the created VPC tier network with user data,
-        #    check if the Isolated network state is changed to "Implemented",
-        #    and the VM is successfully deployed and is in "Running" state.
-        #    Check that no VR is deployed.
-        # 4. SSH into the deployed VM and verify its user data in the iso
-        #    (expected user data == actual user data).
-        # 5. Verify that the guest VM's password in the iso.
-        # 6. Reset VM password, and start the VM.
-        # 7. Verify that the new guest VM template is password enabled by
-        #    checking the VM's password (password != "password").
-        # 8. SSH into the VM for verifying its new password
-        #     after its password reset.
-        # 9. Verify various scenarios and check the data in configdrive iso
+        # 1. Given ConfigDrive provider is disabled in zone
+        #    And a network offering for VPC which has
+        #      user data provided by ConfigDrive
+        #    And a VPC
+        #    Then creating an VPC Tier in the VPC
+        #    using that network offering fails
+
+        # 2. Given ConfigDrive provider is enabled in zone
+        #    And a network offering for VPC which has
+        #      user data provided by ConfigDrive
+        #    And a VPC
+        #    When I create an VPC Tier in the VPC  using that network offering
+        #    Then the network is successfully created,
+        #    And is in the "Allocated" state.
+
+        # 3. When I deploy a VM in the created VPC tier with user data,
+        #    Then the network state is changed to "Implemented"
+        #    And the VM is successfully deployed and is in the "Running" state
+
+        # 4. And the user data in the ConfigDrive device is as expected
+        # 5. And the the vm password in the ConfigDrive device is as expected
+
+        # 6. When I stop, reset the password, and start the VM
+        # 7. Then I can login into the VM using the new password.
+        # 8. And the the vm password in the ConfigDrive device is the new one
+
+        # 9. Verify various scenarios and check the data in configdriveIso
         # 10. Delete all the created objects (cleanup).
 
-        for zone in self.zones:
-            self.debug("Zone - %s" % zone.name)
-            # Get Zone details
-            self.getZoneDetails(zone=zone)
-            # Configure VSD sessions
-            self.configureVSDSessions()
-
-            self.update_provider_state("Disabled")
-            create_vpc = self.verify_vpc_creation(
-                offering_name="vpc_offering_configdrive_withoutdns")
-            self.assertTrue(create_vpc.success,
-                            "Vpc found success = %s, expected success = %s"
-                            % (str(create_vpc.success), 'True'))
-            acl_list = self.create_NetworkAclList(
-                name="acl", description="acl", vpc=create_vpc.vpc)
-            acl_item = self.create_NetworkAclRule(
-                self.test_data["ingress_rule"], acl_list=acl_list)
-
-            self.debug("+++Testing configdrive in a VPC Tier network fails..."
-                       "as provider configdrive is still disabled...")
-            create_networkfails = \
-                self.verify_network_creation(
-                    offering_name="vpc_network_offering_configdrive_"
-                                  "withoutdns",
-                    gateway='10.1.1.1',
-                    vpc=create_vpc.vpc,
-                    acl_list=acl_list)
-            self.assertFalse(create_networkfails.success,
-                             "Create Network found success = %s, "
-                             "expected success = %s"
-                             % (str(create_networkfails.success), 'False'))
-            self.debug("Testing user data&password reset functionality using"
-                       "configdrive in a VPC network without VR...")
-            self.update_provider_state("Enabled")
-
-            create_tiernetwork = \
-                self.verify_network_creation(
-                    offering=create_networkfails.offering,
-                    gateway='10.1.1.1',
-                    vpc=create_vpc.vpc,
-                    acl_list=acl_list)
-            self.assertTrue(create_tiernetwork.success,
-                            "Create Network found success = %s, "
-                            "expected success = %s"
-                            % (str(create_tiernetwork.success), 'True'))
-            self.validate_Network(create_tiernetwork.network,
-                                  state="Implemented")
-
-            create_tiernetwork2 = \
-                self.verify_network_creation(
-                    offering=create_networkfails.offering,
-                    gateway='10.1.2.1',
-                    vpc=create_vpc.vpc,
-                    acl_list=acl_list)
-            self.assertTrue(create_tiernetwork2.success,
-                            'Network found success= %s, expected success= %s'
-                            % (str(create_tiernetwork2.success), 'True'))
-            self.validate_Network(create_tiernetwork2.network,
-                                  state="Implemented")
-
-            self.update_password_enable_in_template(True)
-
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm = self.create_guest_vm(create_tiernetwork.network,
-                                      acl_item,
-                                      vpc=create_vpc.vpc,
-                                      keypair=self.keypair.name)
-
-            vpc_public_ip_1 = \
-                self.acquire_PublicIPAddress(create_tiernetwork.network,
-                                             create_vpc.vpc)
-            self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1,
-                                             create_tiernetwork.network)
-
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(True),
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            expected_user_data = self.update_userdata(vm, "helloworld vm1")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(True),
-                                             metadata=True,
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-
-            self.debug("Resetting password for VM - %s" % vm.name)
-            self.reset_password(vm)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-
-            self.generate_ssh_keys()
-            self.update_sshkeypair(vm)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(True),
-                                             metadata=True,
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            # After sshkey reset we need to have the vm password again
-            vm.password = vm.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm.name, vm.password))
-
-            self.debug("+++ Restarting the created vpc without "
-                       "cleanup...")
-            self.restart_Vpc(create_vpc.vpc, cleanup=False)
-            self.validate_Vpc(create_vpc.vpc, state="Enabled")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Adding a non-default nic to the VM "
-                       "making it a multi-nic VM...")
-            self.nic_operation_VM(vm, create_tiernetwork2.network,
-                                  operation="add")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             metadata=True,
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            vm.password = vm.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm.name, vm.password))
-
-            expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("updating non-default nic as the default nic "
-                       "of the multi-nic VM and enable staticnat...")
-            self.nic_operation_VM(vm,
-                                  create_tiernetwork2.network,
-                                  operation="update")
-
-            vpc_public_ip_2 = \
-                self.acquire_PublicIPAddress(create_tiernetwork2.network,
-                                             create_vpc.vpc)
-            self.create_StaticNatRule_For_VM(vm, vpc_public_ip_2,
-                                             create_tiernetwork2.network)
-            vm.stop(self.api_client)
-            vm.start(self.api_client)
-            self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-            vm.password = vm.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm.name, vm.password))
-            self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data1)
-            expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1")
-            self.verify_config_drive_content(vm, vpc_public_ip_2,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data1)
-
-            self.debug("Updating the default nic of the multi-nic VM, "
-                       "deleting the non-default nic...")
-            self.nic_operation_VM(vm,
-                                  create_tiernetwork.network,
-                                  operation="update")
-            vm.stop(self.api_client)
-            vm.start(self.api_client)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-            vpc_public_ip_2.delete(self.api_client)
-            self.nic_operation_VM(vm,
-                                  create_tiernetwork2.network,
-                                  operation="remove")
-            create_tiernetwork2.network.delete(self.api_client)
-
-            vm.password = vm.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm.name, vm.password))
-
-            self.debug("+++ Restarting the created vpc with "
-                       "cleanup...")
-            self.restart_Vpc(create_vpc.vpc, cleanup=True)
-            self.validate_Vpc(create_vpc.vpc, state="Enabled")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ Restarting the created VPC Tier network without "
-                       "cleanup...")
-            create_tiernetwork.network.restart(self.api_client, cleanup=False)
-            self.validate_Network(create_tiernetwork.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ Restarting the created VPC Tier network with "
-                       "cleanup...")
-            create_tiernetwork.network.restart(self.api_client, cleanup=True)
-            self.validate_Network(create_tiernetwork.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Testing user data & password reset functionality "
-                       " using configdrive in a VPC network with VR...")
-            create_vrvpc = self.verify_vpc_creation(
-                offering_name="vpc_offering_configdrive_withdns")
-            self.assertTrue(create_vrvpc.success,
-                            'Vpc found success = %s, expected success = %s'
-                            % (str(create_vrvpc.success), 'True'))
-            acl_list2 = self.create_NetworkAclList(
-                name="acl", description="acl", vpc=create_vrvpc.vpc)
-            acl_item2 = self.create_NetworkAclRule(
-                self.test_data["ingress_rule"], acl_list=acl_list2)
-            create_vrnetwork = \
-                self.verify_network_creation(
-                    offering_name="vpc_network_offering_configdrive_withdns",
-                    gateway='10.1.3.1',
-                    vpc=create_vrvpc.vpc,
-                    acl_list=acl_list2)
-            self.assertTrue(create_vrnetwork.success,
-                            "Create Network found success = %s, "
-                            "expected success = %s"
-                            % (str(create_vrnetwork.success), 'True'))
-            self.validate_Network(create_vrnetwork.network,
-                                  state="Implemented")
-            vm2 = self.create_guest_vm(create_vrnetwork.network,
-                                       acl_item2,
-                                       vpc=create_vrvpc.vpc)
-            vr2 = self.get_Router(create_vrnetwork.network)
-            self.check_Router_state(vr2, state="Running")
-
-            # VSD verification
-            self.verify_vsd_network(self.domain.id, create_vrnetwork.network,
-                                    create_vrvpc.vpc)
-            self.verify_vsd_router(vr2)
-            self.debug("+++Verified VR is spawned for this network ")
-            # We need to have the vm password
-            vm2.password = vm2.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm2.password)
-            self.debug("VM2 - %s password - %s !" %
-                       (vm2.name, vm2.password))
-            vpc_public_ip_2 = \
-                self.acquire_PublicIPAddress(create_vrnetwork.network,
-                                             create_vrvpc.vpc)
-            self.create_StaticNatRule_For_VM(vm2, vpc_public_ip_2,
-                                             create_vrnetwork.network)
-
-            self.verify_config_drive_content(
-                vm2, vpc_public_ip_2,
-                self.PasswordTest(vm2.password),
-                metadata=True,
-                userdata=self.test_data["virtual_machine_userdata"][
-                    "userdata"])
-
-            expected_user_data2 = self.update_userdata(vm2, "helloworld vm2")
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2)
-
-            self.debug("+++ Restarting the created vpc without "
-                       "cleanup...")
-            self.restart_Vpc(create_vrvpc.vpc, cleanup=False)
-            self.validate_Vpc(create_vrvpc.vpc, state="Enabled")
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Restarting the created vpc with "
-                       "cleanup...")
-            self.restart_Vpc(create_vrvpc.vpc, cleanup=True)
-            self.validate_Vpc(create_vrvpc.vpc, state="Enabled")
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Restarting the created VPC Tier network without "
-                       "cleanup...")
-            create_vrnetwork.network.restart(self.api_client, cleanup=False)
-            self.validate_Network(create_vrnetwork.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Restarting the created VPC Tier network with "
-                       "cleanup...")
-            create_vrnetwork.network.restart(self.api_client, cleanup=True)
-            self.validate_Network(create_vrnetwork.network,
-                                  state="Implemented")
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            self.debug("+++ Upgrade offering of created VPC network with "
-                       "an offering which removes the VR...")
-            self.upgrade_Network(self.test_data["nuagevsp"][
-                                     "vpc_network_offering_configdrive_"
-                                     "withoutdns"],
-                                 create_vrnetwork.network)
-
-            self.verify_config_drive_content(vm2, vpc_public_ip_2,
-                                             self.PasswordTest(vm2.password),
-                                             userdata=expected_user_data2,
-                                             metadata=True)
-
-            vm2.delete(self.api_client, expunge=True)
-            create_vrnetwork.network.delete(self.api_client)
-            create_vrvpc.vpc.delete(self.api_client)
-
-            self.debug("+++ Verify userdata after rebootVM - %s" % vm.name)
-            vm.reboot(self.api_client)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             metadata=True,
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm.name)
-            expected_user_data = self.update_userdata(vm,
-                                                      "hellovm after reboot")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            self.debug("Resetting password for VM - %s" % vm.name)
-            self.reset_password(vm)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password))
-
-            self.debug("+++ Migrating one of the VMs in the created "
-                       "VPC Tier network to another host, if available...")
-            self.migrate_VM(vm)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata after migrating VM - %s" % vm.name)
-            expected_user_data = self.update_userdata(vm,
-                                                      "hellovm after migrate")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password),
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            self.debug("Resetting password for VM - %s" % vm.name)
-            self.reset_password(vm)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password))
-
-            self.debug("+++ Verify userdata after stopstartVM - %s" % vm.name)
-            vm.stop(self.api_client)
-            vm.start(self.api_client)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm.name)
-            expected_user_data = self.update_userdata(vm,
-                                                      "hello after stopstart")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            self.debug("Resetting password for VM - %s" % vm.name)
-            self.reset_password(vm)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(vm.password))
-
-            self.debug("+++ Verify userdata after recoverVM - %s" % vm.name)
-            vm.delete(self.api_client, expunge=False)
-            self.debug("Recover VM - %s" % vm.name)
-            vm.recover(self.api_client)
-            vm.start(self.api_client)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            self.update_provider_state("Disabled")
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ When template is not password enabled "
-                       "verify configdrive of VM - %s" % vm.name)
-            vm.delete(self.api_client, expunge=True)
-            self.update_provider_state("Enabled")
-            self.updateTemplate(False)
-
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm = self.create_guest_vm(create_tiernetwork.network,
-                                      acl_item,
-                                      vpc=create_vpc.vpc,
-                                      keypair=self.keypair.name)
-
-            expected_user_data = self.update_userdata(vm,
-                                                      "This is sample data")
-            vpc_public_ip_1 = \
-                self.acquire_PublicIPAddress(create_tiernetwork.network,
-                                             create_vpc.vpc)
-            self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1,
-                                             create_tiernetwork.network)
-            self.verify_config_drive_content(vm, vpc_public_ip_1,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            vm.delete(self.api_client, expunge=True)
-            create_tiernetwork.network.delete(self.api_client)
+        self.debug("+++ Scenario: creating an VPC tier with "
+                   "config drive fails when config drive provider is "
+                   "disabled.")
+        self.given_a_vpc()
+        self.given_config_drive_provider_is("Disabled")
+        self.given_a_network_offering_for_vpc_with_configdrive()
+        self.then_creating_a_vpc_tier_with_that_offering_fails()
+
+        self.debug("+++ Preparation Scenario: "
+                   "Create 2 tier with config drive "
+                   "when config drive provider is enabled.")
+
+        self.given_config_drive_provider_is("Enabled")
+
+        create_network1 = self.when_I_create_a_vpc_tier_with_that_offering(
+            gateway='10.1.1.1')
+        self.then_the_network_is_successfully_created(create_network1)
+        self.then_the_network_has(create_network1, state="Implemented")
+
+        create_network2 = self.when_I_create_a_vpc_tier_with_that_offering(
+            gateway='10.1.2.1')
+        self.then_the_network_is_successfully_created(create_network2)
+        self.then_the_network_has(create_network2, state="Implemented")
+
+        network1 = create_network1.network
+        network2 = create_network2.network
+
+        self.given_template_password_enabled_is(True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Deploy VM in the Tier 1 with user data")
+        vm = self.when_I_deploy_a_vm(network1,
+                                     keypair=self.keypair.name)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm, network1)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        self.update_and_validate_userdata(vm, "helloworld vm1",
+                                          public_ip_1,
+                                          metadata=True)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        self.update_and_validate_sshkeypair(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Restarting the created vpc without cleanup...")
+        self.when_I_restart_the_vpc_with(cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        # =====================================================================
+        self.debug("Adding a non-default nic to the VM "
+                   "making it a multi-nic VM...")
+        self.plug_nic(vm, network2)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+        with self.stopped_vm(vm):
+            self.when_I_reset_the_password(vm)
+            self.when_I_update_userdata(vm, "hellomultinicvm1")
+
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        self.unplug_nic(vm, network2)
+        self.delete(network2)
+
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart VPC with cleanup...")
+        self.when_I_restart_the_vpc_with(cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after Restart tier without cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=False)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after restart tier with cleanup...")
+        self.when_I_restart_the_network_with(network1, cleanup=True)
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after reboot")
+        vm.reboot(self.api_client)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm, "hello reboot", public_ip_1)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after migrate")
+        self.migrate_VM(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+        self.update_and_validate_userdata(vm, "hello migrate", public_ip_1)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after stop/start")
+        self.stop_and_start_vm(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        self.update_and_validate_userdata(vm, "hello stop/start", public_ip_1)
+
+        self.when_I_reset_the_password(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after delete/recover")
+        self.delete_and_recover_vm(vm)
+        self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Verify configdrive when template is not password enabled")
+        self.given_config_drive_provider_is("Disabled")
+        self.then_config_drive_is_as_expected(vm, public_ip_1,
+                                              metadata=True, reconnect=False)
+        self.given_config_drive_provider_is("Enabled")
+
+        self.delete(vm, expunge=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Update Userdata on a VM that is not password enabled")
+
+        self.update_template(passwordenabled=False)
+
+        vm = self.when_I_deploy_a_vm(network1,
+                                     keypair=self.keypair.name)
+        public_ip_1 = \
+            self.when_I_create_a_static_nat_ip_to(vm, network1)
+        self.update_and_validate_userdata(vm, "This is sample data",
+                                          public_ip_1,
+                                          metadata=True)
+
+        self.delete(vm, expunge=True)
+        self.delete(network1)
 
     def handle_threads(self, source_threads, thread_class, **kwargs):
         my_threads = []
@@ -1197,13 +844,13 @@ def test_nuage_configDrive_concurrency(self):
 
         #
         #  1. When ConfigDrive enabled create network
-        default_state = self.update_provider_state("Enabled")
+        default_state = self.given_config_drive_provider_is("Enabled")
         create_network = self.verify_network_creation(
             offering_name="isolated_configdrive_network_offering_withoutdns",
             gateway='10.1.1.1')
         #
         # 2. Concurrently create all VMs
-        self.password_enabled = self.update_password_enable_in_template(False)
+        self.password_enabled = self.given_template_password_enabled_is(False)
         my_create_threads = []
         nbr_vms = 5
         for i in range(nbr_vms):
@@ -1246,21 +893,19 @@ def test_nuage_configDrive_concurrency(self):
             for aThread in my_update_threads:
                 #
                 # create floating ip
-                self.create_and_verify_fip_and_fw(aThread.get_vm(),
-                                                  public_ip_1,
-                                                  create_network.network)
+                self.when_I_create_a_static_nat_ip_to(aThread.get_vm(),
+                                                      create_network.network,
+                                                      public_ip_1)
                 #
                 # verify userdata
                 self.debug("[Concurrency]verify userdata for vm %s"
                            % aThread.get_vm().name)
-                self.verify_config_drive_content(
-                    aThread.get_vm(), public_ip_1,
-                    self.PasswordTest(None),
-                    userdata=aThread.get_userdata())
+                self.then_config_drive_is_as_expected(
+                    aThread.get_vm(), public_ip_1)
                 self.delete_StaticNatRule_For_VM(public_ip_1)
             #
             #  8. Concurrently reset password on all VM's
-            self.update_password_enable_in_template(True)
+            self.given_template_password_enabled_is(True)
             my_reset_threads = self.handle_threads(my_create_threads,
                                                    self.ResetPassword)
             #
@@ -1268,29 +913,28 @@ def test_nuage_configDrive_concurrency(self):
             self.debug("\n+++ [Concurrency]Verify passwords on all VM's")
             for aThread in my_reset_threads:
                 # create floating ip
-                self.create_and_verify_fip_and_fw(aThread.get_vm(),
-                                                  public_ip_1,
-                                                  create_network.network)
+                self.when_I_create_a_static_nat_ip_to(aThread.get_vm(),
+                                                      create_network.network,
+                                                      public_ip_1)
 
                 # verify password
                 self.debug("[Concurrency]verify password for vm %s"
                            % aThread.get_vm().name)
-                self.verify_config_drive_content(
-                    aThread.get_vm(), public_ip_1,
-                    self.PasswordTest(aThread.get_password()))
+                self.then_config_drive_is_as_expected(
+                    aThread.get_vm(), public_ip_1)
                 self.delete_StaticNatRule_For_VM(public_ip_1)
             public_ip_1.delete(self.api_client)
 
             self.debug("\n+++ [Concurrency]Stop all VM's")
 
         finally:
-            self.update_password_enable_in_template(self.password_enabled)
+            self.given_template_password_enabled_is(self.password_enabled)
             #
             # 11. Concurrently delete all VM's.
             self.handle_threads(my_create_threads, self.StopVM)
             #
             # 12. Restore ConfigDrive provider state
-            self.update_provider_state(default_state)
+            self.given_config_drive_provider_is(default_state)
             #
             # 13. Delete all the created objects (cleanup).
             self.delete_Network(create_network.network)
@@ -1328,323 +972,204 @@ def test_nuage_configdrive_shared_network(self):
         # 9. Verify various scenarios and check the data in configdriveIso
         # 10. Delete all the created objects (cleanup).
 
-        for zone in self.zones:
-            self.debug("Zone - %s" % zone.name)
-            # Get Zone details
-            self.getZoneDetails(zone=zone)
-            # Configure VSD sessions
-            self.configureVSDSessions()
-            if not self.isNuageInfraUnderlay:
-                self.skipTest(
-                    "Configured Nuage VSP SDN platform infrastructure "
-                    "does not support underlay networking: "
-                    "skipping test")
-
-            self.debug("+++Testing configdrive in an shared network fails..."
-                       "as provider configdrive is still disabled...")
-            self.update_provider_state("Disabled")
-            shared_test_data = self.test_data["nuagevsp"]["network_all"]
-            shared_network = self.verify_network_creation(
-                offering_name="shared_nuage_network_config_drive_offering",
-                testdata=shared_test_data)
-            self.assertFalse(shared_network.success,
-                             'Network found success = %s, expected success =%s'
-                             % (str(shared_network.success), 'False'))
-
-            self.update_provider_state("Enabled")
-            shared_network = self.verify_network_creation(
-                offering=shared_network.offering, testdata=shared_test_data)
-            self.assertTrue(shared_network.success,
-                            'Network found success = %s, expected success = %s'
-                            % (str(shared_network.success), 'True'))
-
-            self.validate_Network(shared_network.network, state="Allocated")
-
-            shared_test_data2 = self.test_data["nuagevsp"]["network_all2"]
-            shared_network2 = self.verify_network_creation(
-                offering=shared_network.offering,
-                testdata=shared_test_data2)
-            self.assertTrue(shared_network2.success,
-                            'Network found success = %s, expected success = %s'
-                            % (str(shared_network2.success), 'True'))
-
-            self.validate_Network(shared_network2.network, state="Allocated")
-
-            self.debug("+++Test user data & password reset functionality "
-                       "using configdrive in an Isolated network without VR")
-
-            self.update_password_enable_in_template(True)
-            public_ip_ranges = PublicIpRange.list(self.api_client)
-            for ip_range in public_ip_ranges:
-                if shared_network.network.id == ip_range.networkid \
-                        or shared_network2.network.id == ip_range.networkid:
-                    self.enable_NuageUnderlayPublicIpRange(ip_range.id)
-
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-
-            self.debug("+++Deploy of a VM on a shared network with multiple "
-                       "ip ranges, all should have the same value for the "
-                       "underlay flag.")
-            # Add subnet of different gateway
-            self.debug("+++ Adding subnet of different gateway")
-
-            subnet = self.add_subnet_verify(
-                shared_network.network,
-                self.test_data["nuagevsp"]["publiciprange2"])
-            tmp_test_data = copy.deepcopy(
-                self.test_data["virtual_machine"])
-
-            tmp_test_data["ipaddress"] = \
-                self.test_data["nuagevsp"]["network_all"]["endip"]
+        if not self.isNuageInfraUnderlay:
+            self.skipTest(
+                "Configured Nuage VSP SDN platform infrastructure "
+                "does not support underlay networking: "
+                "skipping test")
 
-            with self.assertRaises(Exception):
-                self.create_VM(
-                    [shared_network.network],
-                    testdata=tmp_test_data)
-
-            self.debug("+++ In a shared network with multiple ip ranges, "
-                       "userdata with config drive must be allowed.")
-
-            self.enable_NuageUnderlayPublicIpRange(subnet.vlan.id)
-
-            vm1 = self.create_VM(
-                [shared_network.network],
-                testdata=self.test_data["virtual_machine_userdata"],
-                keypair=self.keypair.name)
-            # Check VM
-            self.check_VM_state(vm1, state="Running")
-            # Verify shared Network and VM in VSD
-            self.verify_vsd_shared_network(
-                self.domain.id,
-                shared_network.network,
-                gateway=self.test_data["nuagevsp"]["network_all"]["gateway"])
-            subnet_id = self.get_subnet_id(
-                shared_network.network.id,
-                self.test_data["nuagevsp"]["network_all"]["gateway"])
-            self.verify_vsd_enterprise_vm(
-                self.domain.id,
-                shared_network.network, vm1,
-                sharedsubnetid=subnet_id)
+        self.debug("+++Testing configdrive in an shared network fails..."
+                   "as provider configdrive is still disabled...")
+        self.given_config_drive_provider_is("Disabled")
+        shared_test_data = self.test_data["nuagevsp"]["network_all"]
+        shared_network = self.verify_network_creation(
+            offering_name="shared_nuage_network_config_drive_offering",
+            testdata=shared_test_data)
+        self.assertFalse(shared_network.success,
+                         'Network found success = %s, expected success =%s'
+                         % (str(shared_network.success), 'False'))
 
-            with self.assertRaises(Exception):
-                self.get_Router(shared_network)
-            self.debug("+++ Verified no VR is spawned for this network ")
-            # We need to have the vm password
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-            public_ip = PublicIPAddress({"ipaddress": vm1})
-            self.verify_config_drive_content(
-                vm1, public_ip,
-                self.PasswordTest(vm1.password),
-                metadata=True,
-                userdata=self.test_data["virtual_machine_userdata"][
-                    "userdata"])
-            expected_user_data = self.update_userdata(vm1, "helloworld vm1")
-            self.verify_config_drive_content(
-                vm1, public_ip, self.PasswordTest(vm1.password),
-                userdata=expected_user_data)
-
-            self.debug("+++ Adding a non-default nic to the VM "
-                       "making it a multi-nic VM...")
-            self.nic_operation_VM(vm1, shared_network2.network,
-                                  operation="add")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             metadata=True,
-                                             userdata=expected_user_data,
-                                             ssh_key=self.keypair)
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hellomultinicvm1")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ Updating non-default nic as the default nic "
-                       "of the multi-nic VM...")
-            self.nic_operation_VM(vm1,
-                                  shared_network2.network, operation="update")
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-
-            public_ip_2 = PublicIPAddress(
-                {"ipaddress": VirtualMachine.list(self.api_client,
-                                                  id=vm1.id)[0].nic[1]})
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hellomultinicvm1")
-            self.verify_config_drive_content(vm1, public_ip_2,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data1)
-
-            self.debug("+++ Updating the default nic of the multi-nic VM, "
-                       "deleting the non-default nic...")
-            self.nic_operation_VM(vm1,
-                                  shared_network.network, operation="update")
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-            public_ip = PublicIPAddress({"ipaddress": vm1})
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             metadata=True,
-                                             userdata=expected_user_data1)
-
-            self.nic_operation_VM(vm1,
-                                  shared_network2.network, operation="remove")
-
-            multinicvm1 = self.create_VM([shared_network2.network,
-                                          shared_network.network])
-            multinicvm1.password = multinicvm1.resetPassword(self.api_client)
-            self.debug("+++ MultiNICVM Password reset to - %s"
-                       % multinicvm1.password)
-            self.debug("MultiNICVM - %s password - %s !"
-                       % (multinicvm1.name, multinicvm1.password))
-            public_ip_3 = \
-                PublicIPAddress(
-                    {"ipaddress": VirtualMachine.list(
-                        self.api_client, id=multinicvm1.id)[0].nic[0]})
-            self.verify_config_drive_content(
-                multinicvm1, public_ip_3,
-                self.PasswordTest(multinicvm1.password),
-                metadata=True)
-            expected_user_data2 = self.update_userdata(multinicvm1,
-                                                       "hello multinicvm1")
-            self.verify_config_drive_content(multinicvm1, public_ip_3,
-                                             self.PasswordTest(True),
-                                             userdata=expected_user_data2)
-            multinicvm1.delete(self.api_client, expunge=True)
-
-            shared_network2.network.delete(self.api_client)
-            # We need to have the vm password
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
-            public_ip = PublicIPAddress({"ipaddress": vm1})
-
-            self.debug("+++ Verifying userdata after rebootVM - %s" % vm1.name)
-            vm1.reboot(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             metadata=True,
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1, "hello afterboot")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             ssh_key=self.keypair)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++ Migrating one of the VMs in the created Isolated "
-                       "network to another host, if available...")
-            self.migrate_VM(vm1)
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata after migrating VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello after migrate")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password),
-                                             userdata=expected_user_data1)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++ Verify userdata after stopstartVM - %s" % vm1.name)
-            vm1.stop(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("Updating userdata for VM - %s" % vm1.name)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello afterstopstart")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1)
-            self.debug("Resetting password for VM - %s" % vm1.name)
-            self.reset_password(vm1)
-            self.debug("SSHing into the VM for verifying its new password "
-                       "after its password reset...")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(vm1.password))
-
-            self.debug("+++ Verify userdata after VM recover- %s" % vm1.name)
-            vm1.delete(self.api_client, expunge=False)
-            self.debug("Recover VM - %s" % vm1.name)
-            vm1.recover(self.api_client)
-            vm1.start(self.api_client)
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            self.update_provider_state("Disabled")
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "hello after recover")
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-
-            self.debug("+++ When template is not password enabled, "
-                       "verify configdrive of VM - %s" % vm1.name)
-            vm1.delete(self.api_client, expunge=True)
-            self.update_provider_state("Enabled")
-            self.updateTemplate(False)
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm1 = self.create_VM(
-                [shared_network.network],
-                testdata=self.test_data["virtual_machine_userdata"],
-                keypair=self.keypair.name)
-            expected_user_data1 = self.update_userdata(vm1,
-                                                       "This is sample data")
-            public_ip = PublicIPAddress({"ipaddress": vm1})
-            self.verify_config_drive_content(vm1, public_ip,
-                                             self.PasswordTest(False),
-                                             userdata=expected_user_data1,
-                                             metadata=True,
-                                             ssh_key=self.keypair)
-            vm1.delete(self.api_client, expunge=True)
-            shared_network.network.delete(self.api_client)
+        self.given_config_drive_provider_is("Enabled")
+        shared_network = self.verify_network_creation(
+            offering=shared_network.offering, testdata=shared_test_data)
+        self.assertTrue(shared_network.success,
+                        'Network found success = %s, expected success = %s'
+                        % (str(shared_network.success), 'True'))
+
+        shared_nw_1 = shared_network.network
+
+        self.validate_Network(shared_nw_1, state="Allocated")
+
+        shared_test_data2 = self.test_data["nuagevsp"]["network_all2"]
+        shared_network2 = self.verify_network_creation(
+            offering=shared_network.offering,
+            testdata=shared_test_data2)
+        self.assertTrue(shared_network2.success,
+                        'Network found success = %s, expected success = %s'
+                        % (str(shared_network2.success), 'True'))
+
+        shared_nw_1 = shared_network.network
+        shared_nw_2 = shared_network2.network
+        shared_nw_ids = [shared_nw_1.id, shared_nw_2.id]
+        self.validate_Network(shared_nw_2, state="Allocated")
+
+        self.debug("+++Test user data & password reset functionality "
+                   "using configdrive in an Isolated network without VR")
+
+        self.given_template_password_enabled_is(True)
+        public_ip_ranges = PublicIpRange.list(self.api_client)
+        for ip_range in public_ip_ranges:
+            if ip_range.networkid in shared_nw_ids:
+                self.enable_NuageUnderlayPublicIpRange(ip_range.id)
+
+        self.debug("+++Deploy of a VM on a shared network with multiple "
+                   "ip ranges, all should have the same value for the "
+                   "underlay flag.")
+        # Add subnet of different gateway
+        self.debug("+++ Adding subnet of different gateway")
+
+        subnet = self.add_subnet_to_shared_network_and_verify(
+            shared_nw_1,
+            self.test_data["nuagevsp"]["publiciprange2"])
+        tmp_test_data = copy.deepcopy(
+            self.test_data["virtual_machine"])
+
+        tmp_test_data["ipaddress"] = \
+            self.test_data["nuagevsp"]["network_all"]["endip"]
+
+        with self.assertRaises(Exception):
+            self.create_VM([shared_nw_1], testdata=tmp_test_data)
+
+        self.debug("+++ In a shared network with multiple ip ranges, "
+                   "userdata with config drive must be allowed.")
+
+        self.enable_NuageUnderlayPublicIpRange(subnet.vlan.id)
+
+        vm1 = self.when_I_deploy_a_vm_with_keypair_in([shared_nw_1])
+
+        self.then_vr_is_as_expected(shared_nw_1)
+
+        public_ip = self.get_public_shared_ip(vm1, shared_nw_1)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+
+        self.update_and_validate_userdata(vm1, "helloworld vm1", public_ip)
+        self.then_config_drive_is_as_expected(vm1, public_ip)
+
+        # =====================================================================
+        # Test using network2 as default network
+        # =====================================================================
+
+        self.debug("+++ Adding a non-default nic to the VM "
+                   "making it a multi-nic VM...")
+        self.plug_nic(vm1, shared_nw_2)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+
+        self.when_I_reset_the_password(vm1)
+        self.update_and_validate_userdata(vm1, "hellomultinicvm1", public_ip)
+
+        self.debug("+++ Updating non-default nic as the default nic "
+                   "of the multi-nic VM...")
+        self.update_default_nic(vm1, shared_nw_2)
+        self.stop_and_start_vm(vm1)
+
+        public_ip_2 = self.get_public_shared_ip(vm1, shared_nw_2)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2, metadata=True)
+
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip_2)
+
+        self.update_and_validate_userdata(vm1, "hellomultinicvm1again",
+                                          public_ip_2)
+
+        self.debug("+++ Updating the default nic of the multi-nic VM, "
+                   "deleting the non-default nic...")
+        self.update_default_nic(vm1, shared_nw_1)
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+
+        self.unplug_nic(vm1, shared_nw_2)
+
+        # =====================================================================
+        # Another Multinic VM
+        # =====================================================================
+
+        multinicvm1 = self.when_I_deploy_a_vm([shared_nw_2, shared_nw_1])
+        public_ip_3 = self.get_public_shared_ip(multinicvm1, shared_nw_2)
+        self.then_config_drive_is_as_expected(
+            multinicvm1, public_ip_3,
+            metadata=True)
+        self.update_and_validate_userdata(multinicvm1, "hello multinicvm1",
+                                          public_ip)
+        self.then_config_drive_is_as_expected(multinicvm1, public_ip_3)
+        multinicvm1.delete(self.api_client, expunge=True)
+        shared_nw_2.delete(self.api_client)
+
+        # =====================================================================
+
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after reboot")
+        vm1.reboot(self.api_client)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello afterboot", public_ip)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after migrate")
+        self.migrate_VM(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+        self.update_and_validate_userdata(vm1, "hello after migrate",
+                                          public_ip)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "update userdata and reset password after stop/start")
+        self.stop_and_start_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip, metadata=True)
+
+        self.update_and_validate_userdata(vm1, "hello afterstopstart",
+                                                                public_ip)
+        self.when_I_reset_the_password(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "verify config drive after delete/recover")
+        self.delete_and_recover_vm(vm1)
+        self.then_config_drive_is_as_expected(vm1, public_ip,
+                                              metadata=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Start VM fails when ConfigDrive provider is disabled")
+        self.given_config_drive_provider_is("Disabled")
+        with self.assertRaises(Exception):
+            self.when_I_update_userdata(vm1, "hi with provider state Disabled")
+        self.given_config_drive_provider_is("Enabled")
+
+        self.delete(vm1, expunge=True)
+
+        # =====================================================================
+        self.debug("+++ Scenario: "
+                   "Update Userdata on a VM that is not password enabled")
+        self.update_template(passwordenabled=False)
+
+        vm1 = self.create_VM(
+            [shared_nw_1],
+            testdata=self.test_data["virtual_machine_userdata"],
+            keypair=self.keypair.name)
+
+        self.update_and_validate_userdata(vm1, "This is sample data",
+                                          public_ip)
+        public_ip = PublicIPAddress({"ipaddress": vm1})
+        self.then_config_drive_is_as_expected(vm1, public_ip,
+                                              metadata=True)
+
+        self.delete(vm1, expunge=True)
+        self.delete(shared_nw_1)
 
     @attr(tags=["advanced", "nuagevsp", "endurance"], required_hardware="true")
     def test_nuage_configdrive_endurance(self):
@@ -1661,61 +1186,28 @@ def test_nuage_configdrive_endurance(self):
         # 5. Wait util all updates are finished
         # 6. Check userdata in VM
         # 7. Delete all the created objects (cleanup).
-        for zone in self.zones:
-            self.debug("Zone - %s" % zone.name)
-            # Get Zone details
-            self.getZoneDetails(zone=zone)
-            # Configure VSD sessions
-            self.configureVSDSessions()
-            self.update_provider_state("Enabled")
-            create_network = self.verify_network_creation(
-                offering_name="isolated_configdrive_network_offering_"
-                              "withoutdns",
-                gateway='10.1.1.1')
-            self.assertTrue(create_network.success,
-                            'Network found success = %s, expected success = %s'
-                            % (str(create_network.success), 'True'))
-
-            self.validate_Network(create_network.network, state="Allocated")
-            self.update_password_enable_in_template(True)
-            self.generate_ssh_keys()
-            self.debug("keypair name %s " % self.keypair.name)
-            vm1 = self.create_guest_vm(create_network.network,
-                                       keypair=self.keypair.name)
 
-            with self.assertRaises(Exception):
-                self.get_Router(create_network)
-            self.debug("+++Verified no VR is spawned for this network ")
-            # We need to have the vm password
-            vm1.password = vm1.resetPassword(self.api_client)
-            self.debug("Password reset to - %s" % vm1.password)
-            self.debug("VM - %s password - %s !" %
-                       (vm1.name, vm1.password))
+        self.given_config_drive_provider_is("Enabled")
+        self.given_template_password_enabled_is(True)
+        self.given_a_network_offering_with_configdrive()
+        create_network = self.when_I_create_a_network_with_that_offering(
+            gateway='10.5.1.1'
+        )
+        self.then_the_network_is_successfully_created(create_network)
+        self.then_the_network_has(create_network, state="Allocated")
 
-            public_ip_1 = self.acquire_PublicIPAddress(create_network.network)
-            self.create_and_verify_fip_and_fw(vm1, public_ip_1,
-                                              create_network.network)
-
-            expected_user_data = self.test_data[
-                "virtual_machine_userdata"]["userdata"]
-            ssh_client = self.verify_config_drive_content(
-                vm1, public_ip_1,
-                self.PasswordTest(vm1.password),
-                metadata=True,
-                userdata=expected_user_data,
-                ssh_key=self.keypair)
-
-            for i in range(0, 300):
-                self.verify_config_drive_content(
-                    vm1, public_ip_1,
-                    self.PasswordTest(vm1.password),
-                    metadata=True,
-                    userdata=expected_user_data,
-                    ssh_key=self.keypair,
-                    ssh_client=ssh_client)
-                expected_user_data = \
-                    self.update_userdata(vm1,
-                                         'This is sample data %s' % i)
+        network = create_network.network
+
+        vm = self.when_I_deploy_a_vm(network, keypair=self.keypair.name)
+        self.then_vr_is_as_expected(network=network)
+
+        public_ip = \
+            self.when_I_create_a_static_nat_ip_to(vm, create_network.network)
+        self.then_config_drive_is_as_expected(vm, public_ip, metadata=True)
+
+        for i in range(0, 30):
+            self.update_and_validate_userdata(vm, 'This is sample data %s' % i,
+                                              public_ip)
 
 if __name__ == "__main__" and __package__ is None:
     __package__ = "integration.plugins.nuage"
diff --git a/test/integration/plugins/nuagevsp/test_nuage_extra_dhcp.py b/test/integration/plugins/nuagevsp/test_nuage_extra_dhcp.py
index e167ddef122..3b58cbd017b 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_extra_dhcp.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_extra_dhcp.py
@@ -19,11 +19,11 @@
 Nuage VSP SDN plugin
 """
 # Import Local Modules
-from nuageTestCase import (nuageTestCase, gherkin)
+from nuageTestCase import (nuageTestCase)
+from nuage_lib import gherkin
 from marvin.cloudstackAPI import updateVirtualMachine, updateZone
 from marvin.lib.base import (Account,
                              Network,
-                             VirtualMachine,
                              Configurations,
                              NetworkOffering)
 # Import System Modules
@@ -56,7 +56,7 @@ def setUpClass(cls, zone=None):
         cls.api_client.updateZone(cmd)
         cls.vpc_offering = cls.create_VpcOffering(
             cls.test_data["nuagevsp"]["vpc_offering_nuage_dhcp"])
-        cls.vpc1 = cls.create_Vpc(cls.vpc_offering, cidr="10.0.0.0/16",
+        cls.vpc1 = cls.create_vpc(cls.vpc_offering, cidr="10.0.0.0/16",
                                   networkDomain="testvpc.com")
 
         cls.vpc_network_offering = cls.create_NetworkOffering(
@@ -423,7 +423,8 @@ def verify_vsd_dhcp_type_notpresent(self, dhcp_types, vm_interface):
     def verify_dhcp_on_vm(
             self, dhcpleasefile, dhcp_option_map, ssh_client, cleanlease=True):
         if self.isSimulator:
-            self.debug("Simulator Environment: Skipping VM DHCP option verification")
+            self.debug("Simulator Environment: "
+                       "Skipping VM DHCP option verification")
             return
 
         cmd = 'cat /var/lib/dhclient/'+dhcpleasefile
@@ -455,8 +456,7 @@ def remove_lease_file(self, ssh_client, dhcpleasefile):
         self.debug("clear lease is done properly:" + completeoutput)
 
     def update_zone_details(self, value):
-        """Updates the VM data"""
-        # update Network Domain at zone level
+        """Updates Network Domain at zone level"""
         cmd = updateZone.updateZoneCmd()
         cmd.id = self.zone.id
         cmd.domain = value
@@ -489,9 +489,9 @@ def validate_isolated_network(
         self.validate_NetworkOffering(network_offering, state="Enabled")
         self.validate_Network(network)
 
-    def validate_vpc(self, vpc, vpc_offering):
+    def validate_vpc_and_vpcoffering(self, vpc, vpc_offering):
         self.debug("Validating vpc...")
-        self.validate_Vpc(vpc)
+        self.validate_vpc(vpc)
         self.validate_VpcOffering(vpc_offering)
 
     def verify_dhcp_options_on_vm(
@@ -590,18 +590,18 @@ def create_vpc_network_offering(self):
         self.validate_NetworkOffering(network_offering, state="Enabled")
         return network_offering
 
-    def create_vpc(self, vpc_offering, cidr="10.0.0.0/16"):
+    def create_and_validate_vpc(self, vpc_offering, cidr="10.0.0.0/16"):
         # Creating a VPC
         self.debug("Creating a VPC with Nuage VSP VPC offering...")
-        vpc = self.create_Vpc(vpc_offering, cidr=cidr,
+        vpc = self.create_vpc(vpc_offering, cidr=cidr,
                               networkDomain="testvpc.com")
-        self.validate_Vpc(vpc, state="Enabled")
+        self.validate_vpc(vpc, state="Enabled")
 
         return vpc
 
     def create_vpc_with_tier(self, domain_name="testvpc.com"):
         vpc_offering = self.create_vpc_offering_with_nuage_dhcp()
-        vpc = self.create_vpc(vpc_offering)
+        vpc = self.create_and_validate_vpc(vpc_offering)
 
         vpc_network_offering = self.create_vpc_network_offering()
         acl_list = self.create_acl_list_with_item(vpc)
@@ -1267,8 +1267,9 @@ def validate_all_extra_dhcp_for_remove_nic_from_vm(
         self.when_i_stop_and_start_a_vm(vm1)
         with self.assertRaises(Exception):
             vm1.remove_nic(
-                self.api_client, [nic for nic in result.nic
-                                  if nic.networkid == network.id][0])
+                self.api_client,
+                [nic for nic in result.nic
+                 if nic.networkid == network.id][0].id)
 
     def validate_all_extra_dhcp_for_update_multinic(
             self, network,
@@ -1683,7 +1684,7 @@ def test_01_nuage_extra_dhcp_single_nic_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_02_nuage_extra_dhcp_single_nic_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_deploy_vm(
@@ -1710,7 +1711,7 @@ def test_04_nuage_extra_dhcp_update_vm_in_isoltated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_05_nuage_extra_dhcp_update_vm_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_after_vm_update(
@@ -1736,7 +1737,7 @@ def test_07_nuage_extra_dhcp_add_nic_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_08_nuage_extra_dhcp_add_nic_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_after_plug_nic(
@@ -1762,7 +1763,7 @@ def test_10_nuage_extra_dhcp_deploy_multi_nic_vm_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_11_nuage_extra_dhcp_deploy_multi_nic_vm_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_multi_nic(
@@ -1788,7 +1789,7 @@ def test_13_nuage_extra_dhcp_update_multi_nic_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_14_nuage_extra_dhcp_update_multi_nic_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_update_multinic(
@@ -1814,7 +1815,7 @@ def test_16_nuage_extra_dhcp_remove_nic_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_17_nuage_extra_dhcp_remove_nic_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_remove_nic_from_vm(
@@ -1841,7 +1842,7 @@ def test_19_nuage_extra_dhcp_vm_actions_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_20_nuage_nuage_extra_dhcp_vm_actions_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_vm_actions_in_network(
@@ -1868,7 +1869,7 @@ def test_22_nuage_extra_dhcp_network_actions_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_23_nuage_nuage_extra_dhcp_network_actions_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_network_actions_in_network(
@@ -1895,7 +1896,7 @@ def test_25_nuage_extra_dhcp_nic_after_migrate_in_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_26_nuage_nuage_extra_dhcp_nic_after_migrate_in_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_add_remove_nic_after_migrate(
@@ -1929,7 +1930,7 @@ def test_29_nuage_extra_dhcp_allocated_isolated_network(self):
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_30_nuage_extra_dhcp_allocated_vpc(self):
         self.update_zone_details("testvpc.com")
-        self.validate_vpc(self.vpc1, self.vpc_offering)
+        self.validate_vpc_and_vpcoffering(self.vpc1, self.vpc_offering)
         self.validate_Network(self.vpc_network)
 
         self.validate_all_extra_dhcp_for_network_in_allocated(
diff --git a/test/integration/plugins/nuagevsp/test_nuage_internal_dns.py b/test/integration/plugins/nuagevsp/test_nuage_internal_dns.py
index 09bcdc9800b..5b8130231f1 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_internal_dns.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_internal_dns.py
@@ -23,7 +23,6 @@
 from marvin.lib.base import Account, Network
 # Import System Modules
 from nose.plugins.attrib import attr
-import time
 
 UPDATED_DOMAIN_NAME = "update.com"
 
@@ -124,7 +123,8 @@ def vm_verify_ping(self, src_vm, public_ip, dst_vm, domain_name):
         self.debug("command is executed properly " + cmd)
         completeoutput = str(outputlist).strip('[]')
         self.debug("complete output is " + completeoutput)
-        expectedlist = ['2 received', dst_vm.name + '.' + domain_name, dst_vm.ipaddress]
+        expectedlist = ['2 received', dst_vm.name + '.' + domain_name,
+                        dst_vm.ipaddress]
         for item in expectedlist:
             if item in completeoutput:
                 self.debug("excepted value found in vm: " + item)
@@ -163,7 +163,8 @@ def test_01_Isolated_Network_with_zone(self):
 
         # Internal DNS check point on VSD
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
             self.verify_vsd_dhcp_option(
@@ -204,7 +205,8 @@ def test_02_Isolated_Network(self):
 
         # Internal DNS check point on VSD
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
             self.verify_vsd_dhcp_option(
@@ -264,7 +266,8 @@ def test_03_Isolated_Network_restarts(self):
 
         # Internal DNS check point on VSD
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, ISOLATED_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
             self.verify_vsd_dhcp_option(
@@ -373,7 +376,8 @@ def test_04_Update_Network_with_Domain(self):
                          "Network Domain is not updated as expected"
                          )
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
             self.verify_vsd_dhcp_option(
@@ -432,7 +436,8 @@ def test_05_Update_Network_with_Domain(self):
                          "Network Domain is not updated as expected"
                          )
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, UPDATED_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
             self.verify_vsd_dhcp_option(
@@ -484,7 +489,7 @@ def test_06_VPC_Network_With_InternalDns(self):
         vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
         self.validate_VpcOffering(vpc_off, state="Enabled")
 
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
 
         self.debug("Creating Nuage Vsp VPC Network offering...")
         network_offering = self.create_NetworkOffering(
@@ -501,10 +506,12 @@ def test_06_VPC_Network_With_InternalDns(self):
 
         # Internal DNS check point on VSD
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
-            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
+            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                        nic, True)
             self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
 
     @attr(tags=["advanced", "nuagevsp"], required_hardware="true")
@@ -527,7 +534,7 @@ def test_07_VPC_Network_With_InternalDns(self):
 
         vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
         self.validate_VpcOffering(vpc_off, state="Enabled")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
 
         self.debug("Creating Nuage Vsp VPC Network offering...")
         network_offering = self.create_NetworkOffering(
@@ -546,7 +553,8 @@ def test_07_VPC_Network_With_InternalDns(self):
         self.verify_vsd_dhcp_option(self.DOMAINNAME, "vpc.com", network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
-            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
+            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                        nic, True)
             self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
 
         self.test_data["virtual_machine"]["displayname"] = "vm2"
@@ -557,7 +565,8 @@ def test_07_VPC_Network_With_InternalDns(self):
         self.verify_vsd_vm(vm_2)
         for nic in vm_2.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
-            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
+            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                        nic, True)
             self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
 
         public_ip_1 = self.acquire_PublicIPAddress(network_1, vpc)
@@ -594,7 +603,7 @@ def test_08_VPC_Network_Restarts_With_InternalDns(self):
 
         vpc_off = self.create_VpcOffering(self.dnsdata["vpc_offering"])
         self.validate_VpcOffering(vpc_off, state="Enabled")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)
 
         self.debug("Creating Nuage Vsp VPC Network offering...")
         network_offering = self.create_NetworkOffering(
@@ -610,10 +619,12 @@ def test_08_VPC_Network_Restarts_With_InternalDns(self):
         self.verify_vsd_vm(vm_1)
         # Internal DNS check point on VSD
         self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", network_1)
-        self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, network_1)
+        self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                    network_1)
         for nic in vm_1.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
-            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
+            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                        nic, True)
             self.verify_vsd_dhcp_option(self.HOSTNAME, "vm1", nic, True)
 
         self.test_data["virtual_machine"]["displayname"] = "vm2"
@@ -624,7 +635,8 @@ def test_08_VPC_Network_Restarts_With_InternalDns(self):
         self.verify_vsd_vm(vm_2)
         for nic in vm_2.nic:
             self.verify_vsd_dhcp_option(self.DNS, "10.1.1.2", nic, True)
-            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)
+            self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME,
+                                        nic, True)
             self.verify_vsd_dhcp_option(self.HOSTNAME, "vm2", nic, True)
 
         public_ip_1 = self.acquire_PublicIPAddress(network_1, vpc)
diff --git a/test/integration/plugins/nuagevsp/test_nuage_network_migration.py b/test/integration/plugins/nuagevsp/test_nuage_network_migration.py
index c79e758bc0b..d051712dc01 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_network_migration.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_network_migration.py
@@ -1154,7 +1154,7 @@ def test_09_migrate_native_persist_staticnat_to_nuage_traffic(self):
     @attr(tags=["migrateACS", "vpcnovms"],
           required_hardware="false")
     def test_10_migrate_native_vpc(self):
-        vpc = self.create_Vpc(self.native_vpc_offering)
+        vpc = self.create_vpc(self.native_vpc_offering)
         network = self.create_Network(self.native_vpc_network_offering,
                                       vpc=vpc)
         self.create_VM(network)
@@ -1222,8 +1222,8 @@ def test_11_migrate_native_vpc_staticnat_to_nuage_traffic(self):
 
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter")
-        vpc = self.create_Vpc(native_vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(native_vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         self.debug("Creating native VPC Network Tier offering "
                    "with Static NAT service provider as VPCVR")
@@ -1484,8 +1484,8 @@ def test_12_migrate_native_vpc_multinic_to_nuage_traffic(self):
 
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter")
-        vpc = self.create_Vpc(native_vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(native_vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         self.debug("Creating native VPC Network Tier offering "
                    "with Static NAT service provider as VPCVR")
@@ -1802,8 +1802,8 @@ def test_13_verify_guestvmip2_when_migrating_to_nuage(self):
 
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter")
-        vpc = self.create_Vpc(native_vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(native_vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         self.debug("Creating native VPC Network Tier offering "
                    "with Static NAT service provider as VPCVR")
@@ -1922,8 +1922,8 @@ def test_15_native_to_native_vpc_migration(self):
 
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter")
-        vpc = self.create_Vpc(native_vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(native_vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         self.debug("Creating native VPC Network Tier offering "
                    "with Static NAT service provider as VPCVR")
diff --git a/test/integration/plugins/nuagevsp/test_nuage_password_reset.py b/test/integration/plugins/nuagevsp/test_nuage_password_reset.py
index 28714a7d354..f6f5f8b89a3 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_password_reset.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_password_reset.py
@@ -120,7 +120,7 @@ def create_and_verify_fw(self, vm, public_ip, network):
 
     # stop_vm - Stops the given VM, and verifies its state
     def stop_vm(self, vm):
-        self.debug("Stoping VM")
+        self.debug("Stopping VM")
         vm.stop(self.api_client)
         list_vm_response = VirtualMachine.list(self.api_client,
                                                id=vm.id
@@ -139,8 +139,8 @@ def stop_vm(self, vm):
     # (SSH client)
     def install_cloud_set_guest_password_script(self, ssh_client):
         if self.isSimulator:
-            self.debug( "Simulator Environment: Skipping installing"
-                        " cloud-set-guest-password script")
+            self.debug("Simulator Environment: Skipping installing"
+                       " cloud-set-guest-password script")
             return
         self.debug("Installing cloud-set-guest-password script")
         cmd = "cd /etc/init.d;wget http://people.apache.org/~tsp/" \
@@ -268,8 +268,7 @@ def test_nuage_UserDataPasswordReset(self):
             self.debug("Actual user data - " + actual_user_data +
                        ", Expected user data - " + expected_user_data)
             self.assertEqual(actual_user_data, expected_user_data,
-                             "Un-expected VM (VM_1) user data"
-                         )
+                             "Un-expected VM (VM_1) user data")
 
             self.debug("Checking for cloud-set-guest-password script in the "
                        "VM for testing password reset functionality...")
@@ -330,12 +329,22 @@ def test_nuage_UserDataPasswordReset(self):
                 vm_test_public_ip = public_ip_1
 
             self.debug("Resetting password for VM - %s" % vm_test.name)
+            self.stop_vm(vm_test)
             vm_test.password = vm_test.resetPassword(self.api_client)
             self.debug("Password reset to - %s" % vm_test.password)
 
             self.debug("Starting the VM")
             vm_test.start(self.api_client)
 
+            self.debug("until CLOUDSTACK-10380 is fixed, redo resetPassword")
+            self.stop_vm(vm_test)
+            self.debug("Resetting password again for VM - %s" % vm_test.name)
+            vm_test.password = vm_test.resetPassword(self.api_client)
+            self.debug("VM - %s password - %s !" %
+                       (vm_test.name, vm_test.password))
+            self.debug("Starting the VM again")
+            vm_test.start(self.api_client)
+
             self.debug("verifying that the guest VM template is password "
                        "enabled...")
             self.debug("VM - %s password - %s !" %
diff --git a/test/integration/plugins/nuagevsp/test_nuage_source_nat.py b/test/integration/plugins/nuagevsp/test_nuage_source_nat.py
index 6b53d2d533d..b31d5ecaafe 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_source_nat.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_source_nat.py
@@ -460,19 +460,19 @@ def test_02_nuage_SourceNAT_vpc_networks(self):
         # Creating VPCs
         self.debug("Creating a VPC with Source NAT service provider as "
                    "NuageVsp...")
-        vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_1, state="Enabled")
+        vpc_1 = self.create_vpc(vpc_off_1, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_1, state="Enabled")
 
         self.debug("Creating a VPC with Source NAT service provider as "
                    "VpcVirtualRouter...")
         with self.assertRaises(Exception):
-            self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
+            self.create_vpc(vpc_off_2, cidr='10.1.0.0/16')
         self.debug("Nuage VSP does not support provider VpcVirtualRouter for "
                    "service Source NAT for VPCs")
 
         self.debug("Creating a VPC without Source NAT service...")
         with self.assertRaises(Exception):
-            self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
+            self.create_vpc(vpc_off_3, cidr='10.1.0.0/16')
         self.debug("Nuage VSP does not support VPCs without Source NAT "
                    "service")
 
@@ -714,8 +714,8 @@ def test_04_nuage_SourceNAT_vpc_network_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Source NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offering
         self.debug("Creating Nuage VSP VPC Network offering with Source NAT "
@@ -887,8 +887,8 @@ def test_05_nuage_SourceNAT_acl_rules_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Source NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating VPC network offering
         self.debug("Creating Nuage VSP VPC Network offering with Source NAT "
@@ -1321,8 +1321,8 @@ def test_08_nuage_SourceNAT_network_restarts_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Source NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating VPC network offering
         self.debug("Creating Nuage VSP VPC Network offering with Source NAT "
diff --git a/test/integration/plugins/nuagevsp/test_nuage_static_nat.py b/test/integration/plugins/nuagevsp/test_nuage_static_nat.py
index e611b97ae08..b1b0d60a225 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_static_nat.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_static_nat.py
@@ -849,19 +849,19 @@ def test_04_nuage_StaticNAT_vpc_networks(self):
         # Creating VPCs
         self.debug("Creating a VPC with Static NAT service provider as "
                    "NuageVsp...")
-        vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_1, state="Enabled")
+        vpc_1 = self.create_vpc(vpc_off_1, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_1, state="Enabled")
 
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter...")
         with self.assertRaises(Exception):
-            self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
+            self.create_vpc(vpc_off_2, cidr='10.1.0.0/16')
         self.debug("Nuage VSP does not support provider VpcVirtualRouter for "
                    "service Static NAT for VPCs")
 
         self.debug("Creating a VPC without Static NAT service...")
-        vpc_2 = self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_2, state="Enabled")
+        vpc_2 = self.create_vpc(vpc_off_3, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_2, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
@@ -1194,8 +1194,8 @@ def test_06_nuage_StaticNAT_vpc_network_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Static NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offering
         self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
@@ -1406,8 +1406,8 @@ def test_07_nuage_StaticNAT_acl_rules_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Static NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offering
         self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
@@ -1696,7 +1696,8 @@ def test_08_nuage_StaticNAT_vm_nic_operations_traffic(self):
         # from the deployed VM
         if not self.isSimulator:
             with self.assertRaises(Exception):
-                self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
+                self.verify_StaticNAT_Internet_traffic(vm, network_1,
+                                                       public_ip_1)
         self.debug("Static NAT rule not enabled in this VM NIC")
         self.verify_StaticNAT_Internet_traffic(vm, network_2, public_ip_2)
 
@@ -1991,8 +1992,8 @@ def test_10_nuage_StaticNAT_network_restarts_traffic(self):
         # Creating VPC
         self.debug("Creating a VPC with Static NAT service provider as "
                    "NuageVsp...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating VPC network offering
         self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
@@ -2088,6 +2089,7 @@ def test_10_nuage_StaticNAT_network_restarts_traffic(self):
         self.debug("Restarting the created VPC network with cleanup...")
         Network.restart(vpc_tier, self.api_client, cleanup=True)
         self.validate_Network(vpc_tier, state="Implemented")
+        vpc_vr = self.get_Router(vpc_tier)
         self.check_Router_state(vpc_vr, state="Running")
         self.check_VM_state(vpc_vm, state="Running")
 
diff --git a/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py b/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
index f80dcb5cb5e..9ccb2f3e3f8 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
@@ -375,25 +375,25 @@ def test_01_nuage_internallb_vpc_Offering(self):
         # Creating VPCs
         self.debug("Creating a VPC with LB service provider as "
                    "InternalLbVm...")
-        vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_1, state="Enabled")
+        vpc_1 = self.create_vpc(vpc_off_1, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_1, state="Enabled")
 
         self.debug("Creating a VPC with LB service provider as "
                    "VpcVirtualRouter...")
         with self.assertRaises(Exception):
-            self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
+            self.create_vpc(vpc_off_2, cidr='10.1.0.0/16')
         self.debug("Nuage VSP does not support provider VpcVirtualRouter for "
                    "service LB for VPCs")
 
         self.debug("Creating a VPC with LB service provider as Netscaler...")
         with self.assertRaises(Exception):
-            self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
+            self.create_vpc(vpc_off_3, cidr='10.1.0.0/16')
         self.debug("Nuage VSP does not support provider Netscaler for service "
                    "LB for VPCs")
 
         self.debug("Creating a VPC without LB service...")
-        vpc_2 = self.create_Vpc(vpc_off_4, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_2, state="Enabled")
+        vpc_2 = self.create_vpc(vpc_off_4, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_2, state="Enabled")
 
     @attr(tags=["advanced", "nuagevsp"], required_hardware="false")
     def test_02_nuage_internallb_vpc_network_offering(self):
@@ -438,8 +438,8 @@ def test_02_nuage_internallb_vpc_network_offering(self):
 
         # Creating VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with LB Service "
@@ -594,12 +594,12 @@ def test_03_nuage_internallb_vpc_networks(self):
 
         # Creating VPCs
         self.debug("Creating a VPC with Internal LB service...")
-        vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_1, state="Enabled")
+        vpc_1 = self.create_vpc(vpc_off_1, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_1, state="Enabled")
 
         self.debug("Creating a VPC without Internal LB service...")
-        vpc_2 = self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc_2, state="Enabled")
+        vpc_2 = self.create_vpc(vpc_off_2, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc_2, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -794,8 +794,8 @@ def test_04_nuage_internallb_rules(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -1148,8 +1148,8 @@ def test_05_nuage_internallb_traffic(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -1477,8 +1477,8 @@ def test_06_nuage_internallb_algorithms_traffic(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -1742,8 +1742,8 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -1881,8 +1881,9 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vsd_firewall_rule(public_ssh_rule)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Restart Internal tier (cleanup = false)
         # InternalLbVm gets destroyed and deployed again in the Internal tier
@@ -1921,13 +1922,15 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
         # Restart Internal tier (cleanup = true)
         # InternalLbVm gets destroyed and deployed again in the Internal tier
         self.debug("Restarting the Internal tier with cleanup...")
         Network.restart(internal_tier, self.api_client, cleanup=True)
         self.validate_Network(internal_tier, state="Implemented")
+        vr = self.get_Router(internal_tier)
         self.check_Router_state(vr, state="Running")
         self.check_VM_state(internal_vm, state="Running")
         self.check_VM_state(internal_vm_1, state="Running")
@@ -1960,8 +1963,9 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Restart Public tier (cleanup = false)
         # This restart has no effect on the InternalLbVm functionality
@@ -2011,6 +2015,7 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.debug("Restarting the Public tier with cleanup...")
         Network.restart(public_tier, self.api_client, cleanup=True)
         self.validate_Network(public_tier, state="Implemented")
+        vr = self.get_Router(public_tier)
         self.check_Router_state(vr, state="Running")
         self.check_VM_state(public_vm, state="Running")
         self.validate_PublicIPAddress(
@@ -2040,8 +2045,9 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Stopping VMs in the Internal tier
         # wget traffic test fails as all the VMs in the Internal tier are in
@@ -2072,8 +2078,10 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vsd_lb_device(int_lb_vm)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm, should_fail=True)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm,
+                                             should_fail=True)
 
         # Starting VMs in the Internal tier
         # wget traffic test succeeds as all the VMs in the Internal tier are
@@ -2112,8 +2120,9 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Restarting VPC (cleanup = false)
         # VPC VR gets destroyed and deployed again in the VPC
@@ -2160,8 +2169,9 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Restarting VPC (cleanup = true)
         # VPC VR gets destroyed and deployed again in the VPC
@@ -2217,9 +2227,13 @@ def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
         self.verify_lb_wget_file(
             wget_file, [internal_vm, internal_vm_1, internal_vm_2])
 
-    def verify_internal_lb_wget_traffic(self, int_lb_rule_1, internal_vm, internal_vm_1, internal_vm_2, public_ip, public_vm, should_fail=False):
+    def verify_internal_lb_wget_traffic(self, int_lb_rule_1, internal_vm,
+                                        internal_vm_1, internal_vm_2,
+                                        public_ip, public_vm,
+                                        should_fail=False):
         if self.isSimulator:
-            self.debug("Simulator Environment: not running wget traffic tests.")
+            self.debug("Simulator Environment: "
+                       "not running wget traffic tests.")
             return
         ssh_client = self.ssh_into_VM(public_vm, public_ip)
         tries = 0
@@ -2240,8 +2254,8 @@ def verify_internal_lb_wget_traffic(self, int_lb_rule_1, internal_vm, internal_v
             with self.assertRaises(Exception):
                 self.verify_lb_wget_file(
                     wget_file, [internal_vm, internal_vm_1, internal_vm_2])
-            self.debug("Failed to wget file as all the VMs in the Internal tier "
-                       "are in stopped state")
+            self.debug("Failed to wget file as all the VMs in the Internal "
+                       "tier are in stopped state")
         else:
             self.verify_lb_wget_file(
                 wget_file, [internal_vm, internal_vm_1, internal_vm_2])
@@ -2278,8 +2292,8 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Internal LB service...")
-        vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_off, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating network offerings
         self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
@@ -2429,8 +2443,9 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vsd_firewall_rule(public_ssh_rule)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # # Stopping the InternalLbVm when the VPC VR is in Stopped state
         self.stop_InternalLbVm(int_lb_vm)
@@ -2449,8 +2464,9 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm,
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm,
                                              should_fail=True)
 
         # # Starting the InternalLbVm when the VPC VR is in Stopped state
@@ -2471,8 +2487,9 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
 
         # Internal LB (wget) traffic test
         # Bug CLOUDSTACK-9837
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # Starting the VPC VR
         # VPC VR has no effect on the InternalLbVm functionality
@@ -2503,8 +2520,10 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm, should_fail=True)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm,
+                                             should_fail=True)
 
         # # Starting the InternalLbVm when the VPC VR is in Running state
         self.start_InternalLbVm(int_lb_vm)
@@ -2523,8 +2542,9 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
 
         # # Force Stopping the InternalLbVm when the VPC VR is in Running state
         self.stop_InternalLbVm(int_lb_vm, force=True)
@@ -2543,8 +2563,9 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm,
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm,
                                              should_fail=True)
 
         # # Starting the InternalLbVm when the VPC VR is in Running state
@@ -2564,5 +2585,6 @@ def test_08_nuage_internallb_appliance_operations_traffic(self):
         self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
 
         # Internal LB (wget) traffic test
-        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm, internal_vm_1,
-                                             internal_vm_2, public_ip, public_vm)
+        self.verify_internal_lb_wget_traffic(int_lb_rule_1, internal_vm,
+                                             internal_vm_1, internal_vm_2,
+                                             public_ip, public_vm)
diff --git a/test/integration/plugins/nuagevsp/test_nuage_vpc_network.py b/test/integration/plugins/nuagevsp/test_nuage_vpc_network.py
index 167559ad3a3..c75159e1741 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_vpc_network.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_vpc_network.py
@@ -20,7 +20,7 @@
 """
 # Import Local Modules
 from nuageTestCase import nuageTestCase
-from marvin.lib.base import Account, VPC
+from marvin.lib.base import Account
 # Import System Modules
 from nose.plugins.attrib import attr
 
@@ -75,8 +75,8 @@ def test_nuage_vpc_network(self):
 
         # Creating a VPC
         self.debug("Creating a VPC with Nuage VSP VPC offering...")
-        vpc = self.create_Vpc(vpc_offering, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(vpc_offering, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
 
         # Creating a network offering
         self.debug("Creating Nuage VSP VPC Network offering...")
diff --git a/test/integration/plugins/nuagevsp/test_nuage_vsp_domain_template.py b/test/integration/plugins/nuagevsp/test_nuage_vsp_domain_template.py
index 165632db701..dcc2025705d 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_vsp_domain_template.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_vsp_domain_template.py
@@ -158,7 +158,7 @@ def setUpClass(cls):
             for i in range(0, 3):
                 cls.domain_template_list.append("domain_template_" + str(i))
             for account in [cls.account_root, cls.account_d1, cls.account_d11]:
-                vpc = cls.create_Vpc(
+                vpc = cls.create_vpc(
                     cls.vpc_offering, cidr='10.1.0.0/16', account=account)
                 cls.create_Network(
                     cls.network_offering,
@@ -350,7 +350,7 @@ def test_01_nuage_Domain_Template_selection_per_VPC(self):
         # 7. Delete all the created objects (cleanup).
 
         # Creating VPC
-        vpc_1 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_1 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
 
         # Associating pre-configured Nuage VSP Domain Template to VPC
         with self.assertRaises(Exception):
@@ -426,7 +426,7 @@ def test_01_nuage_Domain_Template_selection_per_VPC(self):
             domain_template_name=self.domain_template_list[0])
 
         # Creating VPC
-        vpc_2 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_2 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
 
         # Associating pre-configured Nuage VSP Domain Template to VPC
         self.validate_NuageVspDomainTemplate(self.domain_template_list[0])
@@ -481,7 +481,7 @@ def test_01_nuage_Domain_Template_selection_per_VPC(self):
             domain_template_name=self.domain_template_list[1])
 
         # Creating VPC
-        vpc_3 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_3 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
 
         # Associating pre-configured Nuage VSP Domain Template to VPC
         self.validate_NuageVspDomainTemplate(self.domain_template_list[0])
@@ -523,7 +523,7 @@ def test_01_nuage_Domain_Template_selection_per_VPC(self):
             domain_template_name=self.domain_template_list[0])
 
         # Creating VPC and VPC network (tier)
-        vpc = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
         vpc_tier = self.create_Network(self.network_offering, vpc=vpc)
 
         # VSD verification
@@ -669,7 +669,7 @@ def test_07_nuage_Global_Domain_Template(self):
 
         # Creating VPC
         with self.assertRaises(Exception):
-            self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+            self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
         self.debug("VPC creation fails as there is no domain template with "
                    "name invalid_domain_template in VSD as mentioned in "
                    "global setting nuagevsp.vpc.domaintemplate.name")
@@ -685,7 +685,7 @@ def test_07_nuage_Global_Domain_Template(self):
         self.validate_NuageVspDomainTemplate(self.domain_template_list[0])
 
         # Creating VPC and VPC networks (tiers)
-        vpc_1 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_1 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
         vpc_1_tier_1 = self.create_Network(
             self.network_offering, gateway='10.1.1.1', vpc=vpc_1)
         vpc_1_tier_2 = self.create_Network(
@@ -700,7 +700,7 @@ def test_07_nuage_Global_Domain_Template(self):
             domain_template_name=self.domain_template_list[0])
 
         # Creating VPC and VPC networks (tiers)
-        vpc_2 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_2 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
         vpc_2_tier_1 = self.create_Network(
             self.network_offering, gateway='10.1.1.1', vpc=vpc_2)
         vpc_2_tier_2 = self.create_Network(
@@ -715,7 +715,7 @@ def test_07_nuage_Global_Domain_Template(self):
             domain_template_name=self.domain_template_list[0])
 
         # Creating VPC
-        vpc_3 = self.create_Vpc(self.vpc_offering, cidr='10.1.0.0/16')
+        vpc_3 = self.create_vpc(self.vpc_offering, cidr='10.1.0.0/16')
 
         # Associating pre-configured Nuage VSP Domain Template to VPC
         self.validate_NuageVspDomainTemplate(self.domain_template_list[1])
diff --git a/test/integration/plugins/nuagevsp/test_nuage_vsp_mngd_subnets.py b/test/integration/plugins/nuagevsp/test_nuage_vsp_mngd_subnets.py
index c60f9309310..1947f661b61 100644
--- a/test/integration/plugins/nuagevsp/test_nuage_vsp_mngd_subnets.py
+++ b/test/integration/plugins/nuagevsp/test_nuage_vsp_mngd_subnets.py
@@ -343,8 +343,8 @@ def test_02_nuage_mngd_subnets_vpc(self):
         self.api_client.updateZone(cmd)
         self.debug("Creating a VPC with Static NAT service provider as "
                    "VpcVirtualRouter")
-        vpc = self.create_Vpc(self.nuage_vpc_offering, cidr='10.1.0.0/16')
-        self.validate_Vpc(vpc, state="Enabled")
+        vpc = self.create_vpc(self.nuage_vpc_offering, cidr='10.1.0.0/16')
+        self.validate_vpc(vpc, state="Enabled")
         acl_list = self.create_NetworkAclList(
             name="acl", description="acl", vpc=vpc)
         self.create_NetworkAclRule(
@@ -354,8 +354,8 @@ def test_02_nuage_mngd_subnets_vpc(self):
 
         self.debug("Creating another VPC with Static NAT service provider "
                    "as VpcVirtualRouter")
-        vpc2 = self.create_Vpc(self.nuage_vpc_offering, cidr='10.2.0.0/16')
-        self.validate_Vpc(vpc2, state="Enabled")
+        vpc2 = self.create_vpc(self.nuage_vpc_offering, cidr='10.2.0.0/16')
+        self.validate_vpc(vpc2, state="Enabled")
         acl_list2 = self.create_NetworkAclList(
                 name="acl", description="acl", vpc=vpc2)
         self.create_NetworkAclRule(
@@ -447,8 +447,8 @@ def test_02_nuage_mngd_subnets_vpc(self):
 
             self.debug("Creating another VPC with Static NAT service provider "
                        "as VpcVirtualRouter With same CIDR")
-            vpc3 = self.create_Vpc(self.nuage_vpc_offering, cidr='10.1.0.0/16')
-            self.validate_Vpc(vpc3, state="Enabled")
+            vpc3 = self.create_vpc(self.nuage_vpc_offering, cidr='10.1.0.0/16')
+            self.validate_vpc(vpc3, state="Enabled")
             acl_list3 = self.create_NetworkAclList(
                     name="acl", description="acl", vpc=vpc3)
             self.create_NetworkAclRule(
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index b3b3069967f..e8166fbd10d 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -350,10 +350,13 @@ def __init__(self, items, services):
             self.username = services["username"]
         else:
             self.username = 'root'
-        if "password" in services:
-            self.password = services["password"]
-        else:
-            self.password = 'password'
+
+        if "password" not in items:
+            if "password" in services:
+                self.password = services["password"]
+            else:
+                self.password = 'password'
+
         if "ssh_port" in services:
             self.ssh_port = services["ssh_port"]
         else:
diff --git a/tools/marvin/marvin/lib/common.py b/tools/marvin/marvin/lib/common.py
index 86203d6d6fe..03e5fba526b 100644
--- a/tools/marvin/marvin/lib/common.py
+++ b/tools/marvin/marvin/lib/common.py
@@ -345,14 +345,16 @@ def get_template(
     return list_templatesout[0]
 
 
-def get_test_template(apiclient, zone_id=None, hypervisor=None):
+def get_test_template(apiclient, zone_id=None, hypervisor=None, test_templates=None):
     """
     @Name : get_test_template
     @Desc : Retrieves the test template used to running tests. When the template
             is missing it will be download at most one in a zone for a hypervisor.
     @Input : returns a template
     """
-    test_templates = test_data["test_templates"]
+
+    if test_templates is None:
+        test_templates = test_data["test_templates"]
 
     if hypervisor is None:
         return FAILED
diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py
index 310d27ae923..1c62d56c1cc 100644
--- a/tools/marvin/setup.py
+++ b/tools/marvin/setup.py
@@ -54,10 +54,12 @@
           "pyvmomi >= 5.5.0",
           "netaddr >= 0.7.14",
           "dnspython",
-          "ipmisim >= 0.7"
+          "ipmisim >= 0.7",
+          "retries",
+          "PyCrypt"
       ],
       extras_require={
-        "nuagevsp": ["vspk", "PyYAML", "futures", "netaddr", "retries", "jpype1"]
+        "nuagevsp": ["vspk", "PyYAML", "futures", "netaddr", "jpype1"]
       },
       py_modules=['marvin.marvinPlugin'],
       zip_safe=False,


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


> changing passwordenabled to true while guest vm is running causes unexpected passwordreset again in startvm
> -----------------------------------------------------------------------------------------------------------
>
>                 Key: CLOUDSTACK-10380
>                 URL: https://issues.apache.org/jira/browse/CLOUDSTACK-10380
>             Project: CloudStack
>          Issue Type: Bug
>      Security Level: Public(Anyone can view this level - this is the default.) 
>    Affects Versions: 4.12, 4.11.1.0
>            Reporter: Raf Smeets
>            Assignee: Frank Maximus
>            Priority: Major
>
> changing passwordenabled to true while guest vm is running causes passwordreset again in startvm
> Steps to reproduce:
>  # Template passwordenabled flag is set to false
>  # Start Vm
>  # Set template passwordenabled flag to true
>  # StopVm
>  # ResetPassword for Stopped Vm. Password is PasswordA.
>  # StartVm. Password is PasswordB.  This should not happen!!
>  # SSH into VM only works with PasswordB.
> Next steps are as expected.
>  # StopVm
>  # ResetPassword for Stopped Vm. Password is PasswordC.
>  # 1StartVm. No change in password.
>  # SSH into VM works with PasswordC.
> This was found when test/integration/plugins/nuagevsp/test_nuage_passwordreset.py started to fail after merging of PR2651 [https://github.com/apache/cloudstack/pull/2651].



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Mime
View raw message