cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bhais...@apache.org
Subject [cloudstack] branch master updated: CLOUDSTACK-8865: Adding SR doesn't create Storage_pool_host_ref entry for disabled host (#876)
Date Thu, 21 Sep 2017 05:19:14 GMT
This is an automated email from the ASF dual-hosted git repository.

bhaisaab pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/master by this push:
     new c670691  CLOUDSTACK-8865: Adding SR doesn't create Storage_pool_host_ref entry for
disabled host (#876)
c670691 is described below

commit c670691bfb82b31429d6c240290e521ec99b2b4b
Author: SudharmaJain <sudharma.jain@accelerite.com>
AuthorDate: Thu Sep 21 01:19:11 2017 -0400

    CLOUDSTACK-8865: Adding SR doesn't create Storage_pool_host_ref entry for disabled host
(#876)
    
    This causes VM deployment failure on the host that was disabled while adding the storage
repository.
    In the attachCluster function of the PrimaryDataStoreLifeCycle, we were only selecting
hosts that are up and are in enabled state. Here if we select all up hosts, it will populate
the DB properly and will fix this issue. Also added a unit test for attachCluster function.
---
 .../src/com/cloud/resource/ResourceManager.java    |   2 +
 .../ElastistorPrimaryDataStoreLifeCycle.java       |   2 +-
 .../CloudStackPrimaryDataStoreLifeCycleImpl.java   |   2 +-
 ...loudStackPrimaryDataStoreLifeCycleImplTest.java | 170 +++++++++++++++++++++
 .../SolidFireSharedPrimaryDataStoreLifeCycle.java  |   2 +-
 .../com/cloud/resource/ResourceManagerImpl.java    |  17 +++
 .../cloud/resource/MockResourceManagerImpl.java    |   5 +
 test/integration/smoke/test_primary_storage.py     | 113 +++++++++++++-
 8 files changed, 307 insertions(+), 6 deletions(-)

diff --git a/engine/components-api/src/com/cloud/resource/ResourceManager.java b/engine/components-api/src/com/cloud/resource/ResourceManager.java
index 7783fa1..d96434b 100755
--- a/engine/components-api/src/com/cloud/resource/ResourceManager.java
+++ b/engine/components-api/src/com/cloud/resource/ResourceManager.java
@@ -99,6 +99,8 @@ public interface ResourceManager extends ResourceService {
 
     public List<HostVO> listAllHosts(final Host.Type type, final Long clusterId, final
Long podId, final long dcId);
 
+    public List<HostVO> listAllUpHosts(Host.Type type, Long clusterId, Long podId,
long dcId);
+
     public List<HostVO> listAllHostsInCluster(long clusterId);
 
     public List<HostVO> listHostsInClusterByStatus(long clusterId, Status status);
diff --git a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java
b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java
index bbf35e6..d6a67b4 100644
--- a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java
+++ b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java
@@ -359,7 +359,7 @@ public class ElastistorPrimaryDataStoreLifeCycle implements PrimaryDataStoreLife
 
         PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
         // Check if there is host up in this cluster
-        List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
+        List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(),
primarystore.getPodId(), primarystore.getDataCenterId());
         if (allHosts.isEmpty()) {
             primaryDataStoreDao.expunge(primarystore.getId());
             throw new CloudRuntimeException("No host up to associate a storage pool with
in cluster " + primarystore.getClusterId());
diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
index ab9cecf..a500fdb 100644
--- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
+++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
@@ -387,7 +387,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore
         PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)store;
         // Check if there is host up in this cluster
         List<HostVO> allHosts =
-                _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(),
primarystore.getPodId(), primarystore.getDataCenterId());
+                _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(),
primarystore.getPodId(), primarystore.getDataCenterId());
         if (allHosts.isEmpty()) {
             primaryDataStoreDao.expunge(primarystore.getId());
             throw new CloudRuntimeException("No host up to associate a storage pool with
in cluster " + primarystore.getClusterId());
diff --git a/plugins/storage/volume/default/test/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java
b/plugins/storage/volume/default/test/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java
new file mode 100644
index 0000000..ffdc514
--- /dev/null
+++ b/plugins/storage/volume/default/test/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java
@@ -0,0 +1,170 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.resource.ResourceManager;
+import com.cloud.resource.ResourceState;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StorageManagerImpl;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import junit.framework.TestCase;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.provider.DefaultHostListener;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Created by ajna123 on 9/22/2015.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase {
+
+    @InjectMocks
+    PrimaryDataStoreLifeCycle _cloudStackPrimaryDataStoreLifeCycle = new CloudStackPrimaryDataStoreLifeCycleImpl();
+
+    @Spy
+    @InjectMocks
+    StorageManager storageMgr = new StorageManagerImpl();
+
+    @Mock
+    ResourceManager _resourceMgr;
+
+    @Mock
+    AgentManager agentMgr;
+
+    @Mock
+    DataStoreManager _dataStoreMgr;
+
+    @Mock
+    DataStoreProviderManager _dataStoreProviderMgr;
+
+    @Spy
+    @InjectMocks
+    HypervisorHostListener hostListener = new DefaultHostListener();
+
+    @Mock
+    StoragePoolHostDao storagePoolHostDao;
+
+    @Mock
+    PrimaryDataStore store;
+
+    @Mock
+    DataStoreProvider dataStoreProvider;
+
+    @Mock
+    ModifyStoragePoolAnswer answer;
+
+    @Mock
+    StoragePoolInfo info;
+
+    @Mock
+    PrimaryDataStoreDao primaryStoreDao;
+
+    @Mock
+    StoragePoolVO storagePool;
+
+    @Mock
+    PrimaryDataStoreHelper primaryDataStoreHelper;
+
+    @Before
+    public void initMocks() {
+
+        MockitoAnnotations.initMocks(this);
+
+        List<HostVO> hostList = new ArrayList<HostVO>();
+        HostVO host1 = new HostVO(1L, "aa01", Host.Type.Routing, "192.168.1.1", "255.255.255.0",
null, null, null, null, null, null, null, null, null, null,
+                UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0,
0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem);
+        HostVO host2 = new HostVO(1L, "aa02", Host.Type.Routing, "192.168.1.1", "255.255.255.0",
null, null, null, null, null, null, null, null, null, null,
+                UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0,
0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem);
+
+        host1.setResourceState(ResourceState.Enabled);
+        host2.setResourceState(ResourceState.Disabled);
+        hostList.add(host1);
+        hostList.add(host2);
+
+        when(_dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store);
+        when(store.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+        when(store.isShared()).thenReturn(true);
+        when(store.getName()).thenReturn("newPool");
+
+        when(_dataStoreProviderMgr.getDataStoreProvider(anyString())).thenReturn(dataStoreProvider);
+        when(dataStoreProvider.getName()).thenReturn("default");
+        ((StorageManagerImpl)storageMgr).registerHostListener("default", hostListener);
+
+        when(_resourceMgr.listAllUpHosts(eq(Host.Type.Routing), anyLong(), anyLong(), anyLong())).thenReturn(hostList);
+        when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer);
+        when(answer.getResult()).thenReturn(true);
+        when(answer.getPoolInfo()).thenReturn(info);
+
+        when(info.getLocalPath()).thenReturn("/mnt/1");
+        when(info.getCapacityBytes()).thenReturn(0L);
+        when(info.getAvailableBytes()).thenReturn(0L);
+
+        when(storagePoolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(null);
+        when(primaryStoreDao.findById(anyLong())).thenReturn(storagePool);
+        when(primaryStoreDao.update(anyLong(), Mockito.any(StoragePoolVO.class))).thenReturn(true);
+        when(primaryDataStoreHelper.attachCluster(Mockito.any(DataStore.class))).thenReturn(null);
+    }
+
+    @Test
+    public void testAttachCluster() throws Exception {
+        _cloudStackPrimaryDataStoreLifeCycle.attachCluster(store, new ClusterScope(1L, 1L,
1L));
+        verify(storagePoolHostDao,times(2)).persist(Mockito.any(StoragePoolHostVO.class));
+
+    }
+}
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java
b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java
index a1ea9d0..e17219c 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java
@@ -371,7 +371,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
         PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo)store;
 
         // check if there is at least one host up in this cluster
-        List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(),
+        List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(),
                 primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
 
         if (allHosts.isEmpty()) {
diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java
index 87e0bc5..9feb9b7 100755
--- a/server/src/com/cloud/resource/ResourceManagerImpl.java
+++ b/server/src/com/cloud/resource/ResourceManagerImpl.java
@@ -2518,6 +2518,23 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
     }
 
     @Override
+    public List<HostVO> listAllUpHosts(Type type, Long clusterId, Long podId, long
dcId) {
+        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
+        if (type != null) {
+            sc.and(sc.entity().getType(), Op.EQ, type);
+        }
+        if (clusterId != null) {
+            sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
+        }
+        if (podId != null) {
+            sc.and(sc.entity().getPodId(), Op.EQ, podId);
+        }
+        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
+        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
+        return sc.list();
+    }
+
+    @Override
     public List<HostVO> listAllUpAndEnabledNonHAHosts(final Type type, final Long clusterId,
final Long podId, final long dcId) {
         final String haTag = _haMgr.getHaTag();
         return _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, haTag);
diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java
index e293f2d..3eb5f51 100755
--- a/server/test/com/cloud/resource/MockResourceManagerImpl.java
+++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java
@@ -364,6 +364,11 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
         return null;
     }
 
+    @Override
+    public List<HostVO> listAllUpHosts(Type type, Long clusterId, Long podId, long
dcId) {
+        return null;
+    }
+
     /* (non-Javadoc)
      * @see com.cloud.resource.ResourceManager#listAllHostsInCluster(long)
      */
diff --git a/test/integration/smoke/test_primary_storage.py b/test/integration/smoke/test_primary_storage.py
index ab67de0..882df3c 100644
--- a/test/integration/smoke/test_primary_storage.py
+++ b/test/integration/smoke/test_primary_storage.py
@@ -42,6 +42,12 @@ class TestPrimaryStorageServices(cloudstackTestCase):
         self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
         self.pod = get_pod(self.apiclient, self.zone.id)
         self.hypervisor = self.testClient.getHypervisorInfo()
+        self.domain = get_domain(self.apiclient)
+        self.template = get_template(
+            self.apiclient ,
+            self.zone.id ,
+            self.services["ostype"]
+        )
 
         return
 
@@ -249,6 +255,107 @@ class TestPrimaryStorageServices(cloudstackTestCase):
 
         return
 
+    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
+    def test_01_add_primary_storage_disabled_host(self):
+        """Test add primary storage pool with disabled host
+        """
+
+        #Disable a host
+        clusters = list_clusters(
+            self.apiclient,
+            zoneid=self.zone.id
+        )
+        assert isinstance(clusters,list) and len(clusters)>0
+        for cluster in clusters:
+
+            list_hosts_response = list_hosts(
+                self.apiclient,
+                clusterid=cluster.id,
+                type="Routing"
+            )
+            assert isinstance(list_hosts_response,list)
+            if len(list_hosts_response) < 2:
+                continue
+            selected_cluster = cluster
+            selected_host = list_hosts_response[0]
+            Host.update(self.apiclient, id=selected_host.id, allocationstate="Disable")
+
+
+            #create a pool
+            storage_pool_2 = StoragePool.create(
+                self.apiclient,
+                self.services["nfs2"],
+                clusterid=selected_cluster.id,
+                zoneid=self.zone.id,
+                podid=self.pod.id
+            )
+            #self.cleanup.append(storage_pool_2)
+
+            #Enable host and disable others
+            Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable")
+            for host in list_hosts_response :
+                if(host.id == selected_host.id) :
+                    continue
+                Host.update(self.apiclient, id=host.id, allocationstate="Disable")
+
+
+            #put other pools in maintenance
+            storage_pool_list = StoragePool.list(self.apiclient, zoneid = self.zone.id)
+            for pool in storage_pool_list :
+                if(pool.id == storage_pool_2.id) :
+                    continue
+                StoragePool.update(self.apiclient,id=pool.id, enabled=False)
+
+            #deployvm
+            try:
+                # Create Account
+                account = Account.create(
+                    self.apiclient,
+                    self.services["account"],
+                    domainid=self.domain.id
+                )
+
+                service_offering = ServiceOffering.create(
+                    self.apiclient,
+                    self.services["service_offerings"]["tiny"]
+                )
+                self.cleanup.append(service_offering)
+
+                self.virtual_machine = VirtualMachine.create(
+                    self.apiclient,
+                    self.services["virtual_machine"],
+                    accountid=account.name,
+                    zoneid=self.zone.id,
+                    domainid=self.domain.id,
+                    templateid=self.template.id,
+                    serviceofferingid=service_offering.id
+                )
+                self.cleanup.append(self.virtual_machine)
+                self.cleanup.append(account)
+            finally:
+                #cancel maintenance
+                for pool in storage_pool_list :
+                    if(pool.id == storage_pool_2.id) :
+                        continue
+                    StoragePool.update(self.apiclient,id=pool.id, enabled=True)
+                #Enable all hosts
+                for host in list_hosts_response :
+                    if(host.id == selected_host.id) :
+                        continue
+                    Host.update(self.apiclient, id=host.id, allocationstate="Enable")
+
+                cleanup_resources(self.apiclient, self.cleanup)
+                self.cleanup = []
+                StoragePool.enableMaintenance(self.apiclient,storage_pool_2.id)
+                time.sleep(30);
+                cmd = deleteStoragePool.deleteStoragePoolCmd()
+                cmd.id = storage_pool_2.id
+                cmd.forced = True
+                self.apiclient.deleteStoragePool(cmd)
+
+        return
+
+
 class StorageTagsServices:
     """Test Storage Tags Data Class.
     """
@@ -380,7 +487,7 @@ class TestStorageTags(cloudstackTestCase):
             cmd.id = cls.storage_pool_1.id
             cmd.forced = True
             cls.apiclient.deleteStoragePool(cmd)
-            
+
             cleanup_resources(cls.apiclient, cls._cleanup)
         except Exception as e:
             raise Exception("Cleanup failed with %s" % e)
@@ -549,10 +656,10 @@ class TestStorageTags(cloudstackTestCase):
             listall=True
         )
         vol = vm_1_volumes[0]
-        
+
         if self.hypervisor.lower() not in ["vmware", "xenserver"]:
             self.virtual_machine_1.stop(self.apiclient)
-            
+
         # Check migration options for volume
         pools_response = StoragePool.listForMigration(
             self.apiclient,

-- 
To stop receiving notification emails like this one, please contact
['"commits@cloudstack.apache.org" <commits@cloudstack.apache.org>'].

Mime
View raw message