cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From edi...@apache.org
Subject [1/3] move default primary storage plugin into its own pom
Date Thu, 21 Mar 2013 00:30:01 GMT
Updated Branches:
  refs/heads/master 9270b4335 -> 3ed6200ef


http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
new file mode 100644
index 0000000..2991574
--- /dev/null
+++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.capacity.Capacity;
+import com.cloud.capacity.CapacityVO;
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.exception.DiscoveryException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.ResourceManager;
+import com.cloud.server.ManagementServer;
+import com.cloud.storage.OCFS2Manager;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.storage.StoragePoolDiscoverer;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.StoragePoolStatus;
+import com.cloud.storage.StoragePoolWorkVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.dao.StoragePoolWorkDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.Account;
+import com.cloud.user.User;
+import com.cloud.user.UserContext;
+import com.cloud.user.dao.UserDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.UriUtils;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.exception.ExecutionException;
+import com.cloud.vm.ConsoleProxyVO;
+import com.cloud.vm.DomainRouterVO;
+import com.cloud.vm.SecondaryStorageVmVO;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachine.State;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.ConsoleProxyDao;
+import com.cloud.vm.dao.DomainRouterDao;
+import com.cloud.vm.dao.SecondaryStorageVmDao;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+public class CloudStackPrimaryDataStoreLifeCycleImpl implements
+        PrimaryDataStoreLifeCycle {
+    private static final Logger s_logger = Logger
+            .getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class);
+    @Inject
+    protected ResourceManager _resourceMgr;
+    protected List<StoragePoolDiscoverer> _discoverers;
+    @Inject
+    PrimaryDataStoreDao primaryDataStoreDao;
+    @Inject
+    protected OCFS2Manager _ocfs2Mgr;
+    @Inject
+    DataStoreManager dataStoreMgr;
+    @Inject
+    AgentManager agentMgr;
+    @Inject
+    StorageManager storageMgr;
+
+
+    @Inject
+    VolumeDao volumeDao;
+    @Inject
+    VMInstanceDao vmDao;
+    @Inject
+    ManagementServer server;
+    @Inject
+    protected VirtualMachineManager vmMgr;
+    @Inject
+    protected SecondaryStorageVmDao _secStrgDao;
+    @Inject
+    UserVmDao userVmDao;
+    @Inject
+    protected UserDao _userDao;
+    @Inject
+    protected DomainRouterDao _domrDao;
+    @Inject
+    protected StoragePoolHostDao _storagePoolHostDao;
+    @Inject
+    protected AlertManager _alertMgr;
+    @Inject
+    protected ConsoleProxyDao _consoleProxyDao;
+
+    @Inject
+    protected StoragePoolWorkDao _storagePoolWorkDao;
+    @Inject
+    PrimaryDataStoreHelper dataStoreHelper;
+    @Inject
+    StoragePoolAutomation storagePoolAutmation;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        String url = (String) dsInfos.get("url");
+        String providerName = (String)dsInfos.get("providerName");
+        if (clusterId != null && podId == null) {
+            throw new InvalidParameterValueException(
+                    "Cluster id requires pod id");
+        }
+        
+        PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
+
+        URI uri = null;
+        try {
+            uri = new URI(UriUtils.encodeURIComponent(url));
+            if (uri.getScheme() == null) {
+                throw new InvalidParameterValueException("scheme is null "
+                        + url + ", add nfs:// as a prefix");
+            } else if (uri.getScheme().equalsIgnoreCase("nfs")) {
+                String uriHost = uri.getHost();
+                String uriPath = uri.getPath();
+                if (uriHost == null || uriPath == null
+                        || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be nfs://hostname/path");
+                }
+            } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
+                String uriPath = uri.getPath();
+                if (uriPath == null) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be sharedmountpoint://localhost/path");
+                }
+            } else if (uri.getScheme().equalsIgnoreCase("rbd")) {
+                String uriPath = uri.getPath();
+                if (uriPath == null) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be rbd://hostname/pool");
+                }
+            }
+        } catch (URISyntaxException e) {
+            throw new InvalidParameterValueException(url
+                    + " is not a valid uri");
+        }
+
+        String tags = (String) dsInfos.get("tags");
+        Map<String, String> details = (Map<String, String>) dsInfos
+                .get("details");
+        
+        parameters.setTags(tags);
+        parameters.setDetails(details);
+
+        String scheme = uri.getScheme();
+        String storageHost = uri.getHost();
+        String hostPath = uri.getPath();
+        Object localStorage = dsInfos.get("localStorage");
+        if (localStorage != null) {
+            hostPath = hostPath.replace("/", "");
+        }
+        String userInfo = uri.getUserInfo();
+        int port = uri.getPort();
+        StoragePoolVO pool = null;
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("createPool Params @ scheme - " + scheme
+                    + " storageHost - " + storageHost + " hostPath - "
+                    + hostPath + " port - " + port);
+        }
+        if (scheme.equalsIgnoreCase("nfs")) {
+            if (port == -1) {
+                port = 2049;
+            }
+            parameters.setType(StoragePoolType.NetworkFilesystem);
+            parameters.setHost(storageHost);
+            parameters.setPort(port);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("file")) {
+            if (port == -1) {
+                port = 0;
+            }
+            parameters.setType(StoragePoolType.Filesystem);
+            parameters.setHost("localhost");
+            parameters.setPort(0);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
+            parameters.setType(StoragePoolType.SharedMountPoint);
+            parameters.setHost(storageHost);
+            parameters.setPort(0);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("clvm")) {
+            parameters.setType(StoragePoolType.CLVM);
+            parameters.setHost(storageHost);
+            parameters.setPort(0);
+            parameters.setPath(hostPath.replaceFirst("/", ""));
+        } else if (scheme.equalsIgnoreCase("rbd")) {
+            if (port == -1) {
+                port = 6789;
+            }
+            parameters.setType(StoragePoolType.RBD);
+            parameters.setHost(storageHost);
+            parameters.setPort(port);
+            parameters.setPath(hostPath.replaceFirst("/", ""));
+            parameters.setUserInfo(userInfo);
+        } else if (scheme.equalsIgnoreCase("PreSetup")) {
+            parameters.setType(StoragePoolType.PreSetup);
+            parameters.setHost(storageHost);
+            parameters.setPort(0);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("iscsi")) {
+            String[] tokens = hostPath.split("/");
+            int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
+            if (port == -1) {
+                port = 3260;
+            }
+            if (lun != -1) {
+                if (clusterId == null) {
+                    throw new IllegalArgumentException(
+                            "IscsiLUN need to have clusters specified");
+                }
+                hostPath.replaceFirst("/", "");
+                parameters.setType(StoragePoolType.IscsiLUN);
+                parameters.setHost(storageHost);
+                parameters.setPort(port);
+                parameters.setPath(hostPath);
+            } else {
+                for (StoragePoolDiscoverer discoverer : _discoverers) {
+                    Map<StoragePoolVO, Map<String, String>> pools;
+                    try {
+                        pools = discoverer.find(zoneId, podId, uri, details);
+                    } catch (DiscoveryException e) {
+                        throw new IllegalArgumentException(
+                                "Not enough information for discovery " + uri,
+                                e);
+                    }
+                    if (pools != null) {
+                        Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
+                                .entrySet().iterator().next();
+                        pool = entry.getKey();
+                        details = entry.getValue();
+                        break;
+                    }
+                }
+            }
+        } else if (scheme.equalsIgnoreCase("iso")) {
+            if (port == -1) {
+                port = 2049;
+            }
+            parameters.setType(StoragePoolType.ISO);
+            parameters.setHost(storageHost);
+            parameters.setPort(port);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("vmfs")) {
+            parameters.setType(StoragePoolType.VMFS);
+            parameters.setHost("VMFS datastore: " + hostPath);
+            parameters.setPort(0);
+            parameters.setPath(hostPath);
+        } else if (scheme.equalsIgnoreCase("ocfs2")) {
+            port = 7777;
+            parameters.setType(StoragePoolType.OCFS2);
+            parameters.setHost("clustered");
+            parameters.setPort(port);
+            parameters.setPath(hostPath);
+        } else {
+            StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
+                
+            if (type != null) {
+                parameters.setType(type);
+                parameters.setHost(storageHost);
+                parameters.setPort(0);
+                parameters.setPath(hostPath);
+            } else {
+            s_logger.warn("Unable to figure out the scheme for URI: " + uri);
+            throw new IllegalArgumentException(
+                    "Unable to figure out the scheme for URI: " + uri);
+            }
+        }
+
+        if (localStorage == null) {
+            List<StoragePoolVO> pools = primaryDataStoreDao
+                    .listPoolByHostPath(storageHost, hostPath);
+            if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
+                Long oldPodId = pools.get(0).getPodId();
+                throw new CloudRuntimeException("Storage pool " + uri
+                        + " already in use by another pod (id=" + oldPodId + ")");
+            }
+        }
+      
+        Object existingUuid = dsInfos.get("uuid");
+        String uuid = null;
+
+        if (existingUuid != null) {
+            uuid = (String)existingUuid;
+        } else if (scheme.equalsIgnoreCase("sharedmountpoint")
+                || scheme.equalsIgnoreCase("clvm")) {
+            uuid = UUID.randomUUID().toString();
+        } else if (scheme.equalsIgnoreCase("PreSetup")) {
+            uuid = hostPath.replace("/", "");
+        } else {
+            uuid = UUID.nameUUIDFromBytes(
+                    new String(storageHost + hostPath).getBytes()).toString();
+        }
+
+        List<StoragePoolVO> spHandles = primaryDataStoreDao
+                .findIfDuplicatePoolsExistByUUID(uuid);
+        if ((spHandles != null) && (spHandles.size() > 0)) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Another active pool with the same uuid already exists");
+            }
+            throw new CloudRuntimeException(
+                    "Another active pool with the same uuid already exists");
+        }
+
+        String poolName = (String) dsInfos.get("name");
+
+        parameters.setUuid(uuid);
+        parameters.setZoneId(zoneId);
+        parameters.setPodId(podId);
+        parameters.setName(poolName);
+        parameters.setClusterId(clusterId);
+        parameters.setProviderName(providerName);
+        
+        return dataStoreHelper.createPrimaryDataStore(parameters);
+    }
+
+    protected boolean createStoragePool(long hostId, StoragePool pool) {
+        s_logger.debug("creating pool " + pool.getName() + " on  host "
+                + hostId);
+        if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
+                && pool.getPoolType() != StoragePoolType.Filesystem
+                && pool.getPoolType() != StoragePoolType.IscsiLUN
+                && pool.getPoolType() != StoragePoolType.Iscsi
+                && pool.getPoolType() != StoragePoolType.VMFS
+                && pool.getPoolType() != StoragePoolType.SharedMountPoint
+                && pool.getPoolType() != StoragePoolType.PreSetup
+                && pool.getPoolType() != StoragePoolType.OCFS2
+                && pool.getPoolType() != StoragePoolType.RBD
+                && pool.getPoolType() != StoragePoolType.CLVM) {
+            s_logger.warn(" Doesn't support storage pool type "
+                    + pool.getPoolType());
+            return false;
+        }
+        CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
+        final Answer answer = agentMgr.easySend(hostId, cmd);
+        if (answer != null && answer.getResult()) {
+            return true;
+        } else {
+            primaryDataStoreDao.expunge(pool.getId());
+            String msg = "";
+            if (answer != null) {
+                msg = "Can not create storage pool through host " + hostId
+                        + " due to " + answer.getDetails();
+                s_logger.warn(msg);
+            } else {
+                msg = "Can not create storage pool through host " + hostId
+                        + " due to CreateStoragePoolCommand returns null";
+                s_logger.warn(msg);
+            }
+            throw new CloudRuntimeException(msg);
+        }
+    }
+
+    @Override
+    public boolean attachCluster(DataStore store, ClusterScope scope) {
+        PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
+        // Check if there is host up in this cluster
+        List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
+                Host.Type.Routing, primarystore.getClusterId(),
+                primarystore.getPodId(), primarystore.getDataCenterId());
+        if (allHosts.isEmpty()) {
+            throw new CloudRuntimeException(
+                    "No host up to associate a storage pool with in cluster "
+                            + primarystore.getClusterId());
+        }
+
+        if (primarystore.getPoolType() == StoragePoolType.OCFS2
+                && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
+            s_logger.warn("Can not create storage pool " + primarystore
+                    + " on cluster " + primarystore.getClusterId());
+            primaryDataStoreDao.expunge(primarystore.getId());
+            return false;
+        }
+
+        boolean success = false;
+        for (HostVO h : allHosts) {
+            success = createStoragePool(h.getId(), primarystore);
+            if (success) {
+                break;
+            }
+        }
+
+        s_logger.debug("In createPool Adding the pool to each of the hosts");
+        List<HostVO> poolHosts = new ArrayList<HostVO>();
+        for (HostVO h : allHosts) {
+            try {
+                this.storageMgr.connectHostToSharedPool(h.getId(),
+                        primarystore.getId());
+                poolHosts.add(h);
+            } catch (Exception e) {
+                s_logger.warn("Unable to establish a connection between " + h
+                        + " and " + primarystore, e);
+            }
+        }
+
+        if (poolHosts.isEmpty()) {
+            s_logger.warn("No host can access storage pool " + primarystore
+                    + " on cluster " + primarystore.getClusterId());
+            primaryDataStoreDao.expunge(primarystore.getId());
+            return false;
+        }
+        
+        this.dataStoreHelper.attachCluster(store);
+        return true;
+    }
+
+    @Override
+    public boolean attachZone(DataStore dataStore, ZoneScope scope) {
+    	List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
+    	for (HostVO host : hosts) {
+    		try {
+    			this.storageMgr.connectHostToSharedPool(host.getId(),
+    					dataStore.getId());
+    		} catch (Exception e) {
+    			s_logger.warn("Unable to establish a connection between " + host
+    					+ " and " + dataStore, e);
+    		}
+    	}
+    	this.dataStoreHelper.attachZone(dataStore);
+        return true;
+    }
+
+    @Override
+    public boolean dettach() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean unmanaged() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean maintain(DataStore dataStore) {
+        storagePoolAutmation.maintain(dataStore);
+        this.dataStoreHelper.maintain(dataStore);
+        return true;
+    }
+
+    @Override
+    public boolean cancelMaintain(DataStore store) {
+        this.dataStoreHelper.cancelMaintain(store);
+        storagePoolAutmation.cancelMaintain(store);
+        return true;
+    }
+
+    @DB
+    @Override
+    public boolean deleteDataStore(DataStore store) {
+        List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
+                .listByPoolId(store.getId());
+        StoragePool pool = (StoragePool)store;
+        boolean deleteFlag = false;
+        // Remove the SR associated with the Xenserver
+        for (StoragePoolHostVO host : hostPoolRecords) {
+            DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
+                    pool);
+            final Answer answer = agentMgr.easySend(host.getHostId(),
+                    deleteCmd);
+
+            if (answer != null && answer.getResult()) {
+                deleteFlag = true;
+                break;
+            } else {
+                if (answer != null) {
+                    s_logger.debug("Failed to delete storage pool: " + answer.getResult());
+                }
+            }
+        }
+        
+        if (!deleteFlag) {
+            throw new CloudRuntimeException("Failed to delete storage pool on host");
+        }
+        
+        this.dataStoreHelper.deletePrimaryDataStore(store);
+        return false;
+    }
+
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+        this.dataStoreHelper.attachHost(store, scope, existingInfo);
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java
new file mode 100644
index 0000000..4d46d99
--- /dev/null
+++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.provider;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.datastore.driver.CloudStackPrimaryDataStoreDriverImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.CloudStackPrimaryDataStoreLifeCycleImpl;
+
+import com.cloud.utils.component.ComponentContext;
+
+public class CloudStackPrimaryDataStoreProviderImpl implements
+        PrimaryDataStoreProvider {
+
+    private final String providerName = "ancient primary data store provider";
+    protected PrimaryDataStoreDriver driver;
+    protected HypervisorHostListener listener;
+    protected DataStoreLifeCycle lifecyle;
+
+    CloudStackPrimaryDataStoreProviderImpl() {
+        
+    }
+    
+    @Override
+    public String getName() {
+        return providerName;
+    }
+
+    @Override
+    public DataStoreLifeCycle getDataStoreLifeCycle() {
+        return this.lifecyle;
+    }
+
+    @Override
+    public boolean configure(Map<String, Object> params) {
+        lifecyle = ComponentContext.inject(CloudStackPrimaryDataStoreLifeCycleImpl.class);
+        driver = ComponentContext.inject(CloudStackPrimaryDataStoreDriverImpl.class);
+        listener = ComponentContext.inject(DefaultHostListener.class);
+        return true;
+    }
+
+    @Override
+    public PrimaryDataStoreDriver getDataStoreDriver() {
+        return this.driver;
+    }
+
+    @Override
+    public HypervisorHostListener getHostListener() {
+        return this.listener;
+    }
+    
+    @Override
+    public Set<DataStoreProviderType> getTypes() {
+        Set<DataStoreProviderType> types =  new HashSet<DataStoreProviderType>();
+        types.add(DataStoreProviderType.PRIMARY);
+        return types;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
index 91c446f..dc29fb8 100644
--- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
+++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
@@ -26,10 +26,10 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
 import org.apache.cloudstack.storage.command.CreateObjectAnswer;
 import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index 191157a..efdee20 100755
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -2242,6 +2242,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(DeleteAlertsCmd.class);
         cmdList.add(ArchiveEventsCmd.class);
         cmdList.add(DeleteEventsCmd.class);
+        cmdList.add(ListStorageProvidersCmd.class);
         return cmdList;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index b0a1da1..f37654b 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -712,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 }
             }
             DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
-            DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
+            DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
             if (pool == null) {
                 Map<String, Object> params = new HashMap<String, Object>();
                 String name = (host.getName() + " Local Storage");
@@ -724,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 params.put("localStorage", true);
                 params.put("details", pInfo.getDetails());
                 params.put("uuid", pInfo.getUuid());
-                params.put("providerId", provider.getId());
+                params.put("providerName", provider.getName());
                 
                 store = lifeCycle.initialize(params);
             } else {
@@ -748,15 +748,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd)
             throws ResourceInUseException, IllegalArgumentException,
             UnknownHostException, ResourceUnavailableException {
-        String providerUuid = cmd.getStorageProviderUuid();
+        String providerName = cmd.getStorageProviderName();
         DataStoreProvider storeProvider = dataStoreProviderMgr
-                .getDataStoreProviderByUuid(providerUuid);
+                .getDataStoreProvider(providerName);
 
         if (storeProvider == null) {
             storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
             if (storeProvider == null) {
             throw new InvalidParameterValueException(
-                    "can't find storage provider: " + providerUuid);
+                    "can't find storage provider: " + providerName);
             }
         }
 
@@ -821,9 +821,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         params.put("tags", cmd.getTags());
         params.put("name", cmd.getStoragePoolName());
         params.put("details", details);
-        params.put("providerId", storeProvider.getId());
+        params.put("providerName", storeProvider.getName());
 
-        DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
+        DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
         DataStore store = null;
         try {
             store = lifeCycle.initialize(params);
@@ -948,9 +948,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         s_logger.trace("Released lock for storage pool " + id);
 
         DataStoreProvider storeProvider = dataStoreProviderMgr
-                .getDataStoreProviderById(sPool.getStorageProviderId());
-        DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
-        lifeCycle.deleteDataStore(id);
+                .getDataStoreProvider(sPool.getStorageProviderName());
+        DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
+        DataStore store = dataStoreMgr.getDataStore(
+                sPool.getId(), DataStoreRole.Primary);
+        lifeCycle.deleteDataStore(store);
 
         return false;
     }
@@ -963,8 +965,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         s_logger.debug("Adding pool " + pool.getName() + " to  host " + hostId);
 
         DataStoreProvider provider = dataStoreProviderMgr
-                .getDataStoreProviderById(pool.getStorageProviderId());
-        HypervisorHostListener listener = hostListeners.get(provider.getUuid());
+                .getDataStoreProvider(pool.getStorageProviderName());
+        HypervisorHostListener listener = hostListeners.get(provider.getName());
         listener.hostConnect(hostId, pool.getId());
     }
 
@@ -1415,19 +1417,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         }
 
         DataStoreProvider provider = dataStoreProviderMgr
-                .getDataStoreProviderById(primaryStorage.getStorageProviderId());
-        DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
-        lifeCycle.maintain(primaryStorage.getId());
+                .getDataStoreProvider(primaryStorage.getStorageProviderName());
+        DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
+        DataStore store = dataStoreMgr.getDataStore(
+                primaryStorage.getId(), DataStoreRole.Primary);
+        lifeCycle.maintain(store);
 
         return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
                 primaryStorage.getId(), DataStoreRole.Primary);
     }
 
-    private void setPoolStateToError(StoragePoolVO primaryStorage) {
-        primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
-        _storagePoolDao.update(primaryStorage.getId(), primaryStorage);
-    }
-
     @Override
     @DB
     public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(
@@ -1457,29 +1456,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         }
 
         DataStoreProvider provider = dataStoreProviderMgr
-                .getDataStoreProviderById(primaryStorage.getStorageProviderId());
-        DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
-        lifeCycle.cancelMaintain(primaryStorage.getId());
+                .getDataStoreProvider(primaryStorage.getStorageProviderName());
+        DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
+        DataStore store = dataStoreMgr.getDataStore(
+                primaryStorage.getId(), DataStoreRole.Primary);
+        lifeCycle.cancelMaintain(store);
+        
         return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
                 primaryStorage.getId(), DataStoreRole.Primary);
     }
 
-    private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO,
-            Command cmd) {
-        ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO
-                .getClusterId());
-        if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster
-                .getHypervisorType() == HypervisorType.VMware)
-                && ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) {
-            return true;
-        } else {
-            return false;
-        }
-    }
-
-    
-   
-    
     protected class StorageGarbageCollector implements Runnable {
 
         public StorageGarbageCollector() {
@@ -1845,9 +1831,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
 
     @Override
-    public synchronized boolean registerHostListener(String providerUuid,
+    public synchronized boolean registerHostListener(String providerName,
             HypervisorHostListener listener) {
-        hostListeners.put(providerUuid, listener);
+        hostListeners.put(providerName, listener);
         return true;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/server/src/com/cloud/storage/StoragePoolAutomation.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StoragePoolAutomation.java b/server/src/com/cloud/storage/StoragePoolAutomation.java
new file mode 100644
index 0000000..e8eb9b7
--- /dev/null
+++ b/server/src/com/cloud/storage/StoragePoolAutomation.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.storage;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+
+public interface StoragePoolAutomation {
+    public boolean maintain(DataStore store);
+    public boolean cancelMaintain(DataStore store);
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
new file mode 100644
index 0000000..9bba979
--- /dev/null
+++ b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.storage;
+
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.alert.AlertManager;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.resource.ResourceManager;
+import com.cloud.server.ManagementServer;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.dao.StoragePoolWorkDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.Account;
+import com.cloud.user.User;
+import com.cloud.user.UserContext;
+import com.cloud.user.dao.UserDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.exception.ExecutionException;
+import com.cloud.vm.ConsoleProxyVO;
+import com.cloud.vm.DomainRouterVO;
+import com.cloud.vm.SecondaryStorageVmVO;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachine.State;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.ConsoleProxyDao;
+import com.cloud.vm.dao.DomainRouterDao;
+import com.cloud.vm.dao.SecondaryStorageVmDao;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Component
+public class StoragePoolAutomationImpl implements StoragePoolAutomation {
+    private static final Logger s_logger = Logger
+            .getLogger(StoragePoolAutomationImpl.class);
+    @Inject
+    protected VirtualMachineManager vmMgr;
+    @Inject
+    protected SecondaryStorageVmDao _secStrgDao;
+    @Inject
+    UserVmDao userVmDao;
+    @Inject
+    protected UserDao _userDao;
+    @Inject
+    protected DomainRouterDao _domrDao;
+    @Inject
+    protected StoragePoolHostDao _storagePoolHostDao;
+    @Inject
+    protected AlertManager _alertMgr;
+    @Inject
+    protected ConsoleProxyDao _consoleProxyDao;
+
+    @Inject
+    protected StoragePoolWorkDao _storagePoolWorkDao;
+    @Inject
+    PrimaryDataStoreDao primaryDataStoreDao;
+    @Inject
+    DataStoreManager dataStoreMgr;
+    @Inject
+    protected ResourceManager _resourceMgr;
+    @Inject
+    AgentManager agentMgr;
+    @Inject
+    VolumeDao volumeDao;
+    @Inject
+    VMInstanceDao vmDao;
+    @Inject
+    ManagementServer server;
+    @Inject DataStoreProviderManager providerMgr;
+    
+    @Override
+    public boolean maintain(DataStore store) {
+        Long userId = UserContext.current().getCallerUserId();
+        User user = _userDao.findById(userId);
+        Account account = UserContext.current().getCaller();
+        StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
+        try {
+            StoragePool storagePool = (StoragePool) store;
+            List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
+                    pool.getClusterId(), Status.Up);
+            if (hosts == null || hosts.size() == 0) {
+                pool.setStatus(StoragePoolStatus.Maintenance);
+                primaryDataStoreDao.update(pool.getId(), pool);
+                return true;
+            } else {
+                // set the pool state to prepare for maintenance
+                pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
+                primaryDataStoreDao.update(pool.getId(), pool);
+            }
+            // remove heartbeat
+            for (HostVO host : hosts) {
+                ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
+                        false, storagePool);
+                final Answer answer = agentMgr.easySend(host.getId(), cmd);
+                if (answer == null || !answer.getResult()) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("ModifyStoragePool false failed due to "
+                                + ((answer == null) ? "answer null" : answer
+                                        .getDetails()));
+                    }
+                } else {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("ModifyStoragePool false secceeded");
+                    }
+                }
+            }
+            // check to see if other ps exist
+            // if they do, then we can migrate over the system vms to them
+            // if they dont, then just stop all vms on this one
+            List<StoragePoolVO> upPools = primaryDataStoreDao
+                    .listByStatusInZone(pool.getDataCenterId(),
+                            StoragePoolStatus.Up);
+            boolean restart = true;
+            if (upPools == null || upPools.size() == 0) {
+                restart = false;
+            }
+
+            // 2. Get a list of all the ROOT volumes within this storage pool
+            List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
+                    .getId());
+
+            // 3. Enqueue to the work queue
+            for (VolumeVO volume : allVolumes) {
+                VMInstanceVO vmInstance = vmDao
+                        .findById(volume.getInstanceId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // enqueue sp work
+                if (vmInstance.getState().equals(State.Running)
+                        || vmInstance.getState().equals(State.Starting)
+                        || vmInstance.getState().equals(State.Stopping)) {
+
+                    try {
+                        StoragePoolWorkVO work = new StoragePoolWorkVO(
+                                vmInstance.getId(), pool.getId(), false, false,
+                                server.getId());
+                        _storagePoolWorkDao.persist(work);
+                    } catch (Exception e) {
+                        if (s_logger.isDebugEnabled()) {
+                            s_logger.debug("Work record already exists, re-using by re-setting values");
+                        }
+                        StoragePoolWorkVO work = _storagePoolWorkDao
+                                .findByPoolIdAndVmId(pool.getId(),
+                                        vmInstance.getId());
+                        work.setStartedAfterMaintenance(false);
+                        work.setStoppedForMaintenance(false);
+                        work.setManagementServerId(server.getId());
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+            }
+
+            // 4. Process the queue
+            List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
+                    .listPendingWorkForPrepareForMaintenanceByPoolId(pool
+                            .getId());
+
+            for (StoragePoolWorkVO work : pendingWork) {
+                // shut down the running vms
+                VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // if the instance is of type consoleproxy, call the console
+                // proxy
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.ConsoleProxy)) {
+                    // call the consoleproxymanager
+                    ConsoleProxyVO consoleProxy = _consoleProxyDao
+                            .findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
+                        String errorMsg = "There was an error stopping the console proxy id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+
+                        if (this.vmMgr.advanceStart(consoleProxy, null, user,
+                                account) == null) {
+                            String errorMsg = "There was an error starting the console proxy id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+
+                // if the instance is of type uservm, call the user vm manager
+                if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
+                    UserVmVO userVm = userVmDao.findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(userVm, true, user, account)) {
+                        String errorMsg = "There was an error stopping the user vm id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type secondary storage vm, call the
+                // secondary storage vm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.SecondaryStorageVm)) {
+                    SecondaryStorageVmVO secStrgVm = _secStrgDao
+                            .findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
+                        String errorMsg = "There was an error stopping the ssvm id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+                        if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
+                            String errorMsg = "There was an error starting the ssvm id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+
+                // if the instance is of type domain router vm, call the network
+                // manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.DomainRouter)) {
+                    DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(domR, true, user, account)) {
+                        String errorMsg = "There was an error stopping the domain router id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable primary storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+                        if (vmMgr.advanceStart(domR, null, user, account) == null) {
+                            String errorMsg = "There was an error starting the domain router id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+            }
+            
+        } catch(Exception e) {
+            s_logger.error(
+                    "Exception in enabling primary storage maintenance:", e);
+            pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
+            this.primaryDataStoreDao.update(pool.getId(), pool);
+            throw new CloudRuntimeException(e.getMessage());
+        }
+        return true;
+    }
+
+    @Override
+    public boolean cancelMaintain(DataStore store) {
+        // Change the storage state back to up
+        Long userId = UserContext.current().getCallerUserId();
+        User user = _userDao.findById(userId);
+        Account account = UserContext.current().getCaller();
+        StoragePoolVO poolVO = this.primaryDataStoreDao
+                .findById(store.getId());
+        StoragePool pool = (StoragePool)store;
+       
+        List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
+                pool.getClusterId(), Status.Up);
+        if (hosts == null || hosts.size() == 0) {
+            return true;
+        }
+        // add heartbeat
+        for (HostVO host : hosts) {
+            ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
+                    true, pool);
+            final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
+            if (answer == null || !answer.getResult()) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("ModifyStoragePool add failed due to "
+                            + ((answer == null) ? "answer null" : answer
+                                    .getDetails()));
+                }
+            } else {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("ModifyStoragePool add secceeded");
+                }
+            }
+        }
+
+        // 2. Get a list of pending work for this queue
+        List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
+                .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
+
+        // 3. work through the queue
+        for (StoragePoolWorkVO work : pendingWork) {
+            try {
+                VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // if the instance is of type consoleproxy, call the console
+                // proxy
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.ConsoleProxy)) {
+
+                    ConsoleProxyVO consoleProxy = _consoleProxyDao
+                            .findById(vmInstance.getId());
+                    if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
+                        String msg = "There was an error starting the console proxy id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type ssvm, call the ssvm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.SecondaryStorageVm)) {
+                    SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
+                            .getId());
+                    if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
+                        String msg = "There was an error starting the ssvm id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type ssvm, call the ssvm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.DomainRouter)) {
+                    DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
+                    if (vmMgr.advanceStart(domR, null, user, account) == null) {
+                        String msg = "There was an error starting the domR id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type user vm, call the user vm manager
+                if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
+                    UserVmVO userVm = userVmDao.findById(vmInstance.getId());
+
+                    if (vmMgr.advanceStart(userVm, null, user, account) == null) {
+
+                        String msg = "There was an error starting the user vm id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+                return true;
+            } catch (Exception e) {
+                s_logger.debug("Failed start vm", e);
+                throw new CloudRuntimeException(e.toString());
+            }
+        }
+        return false;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/setup/db/db/schema-410to420.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql
index 4e39a71..eb650cc 100644
--- a/setup/db/db/schema-410to420.sql
+++ b/setup/db/db/schema-410to420.sql
@@ -29,6 +29,7 @@ DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max';
 INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');
 ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager';
 
+alter table storage_pool change storage_provider_id storage_provider_name varchar(255);
 alter table template_host_ref add state varchar(255);
 alter table template_host_ref add update_count bigint unsigned;
 alter table template_host_ref add updated datetime;
@@ -70,13 +71,12 @@ CREATE TABLE `cloud`.`data_store_provider` (
 CREATE TABLE `cloud`.`image_data_store` (
   `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
   `name` varchar(255) NOT NULL COMMENT 'name of data store',
-  `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider',
+  `image_provider_name` varchar(255) NOT NULL COMMENT 'id of image_data_store_provider',
   `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store',
   `data_center_id` bigint unsigned  COMMENT 'datacenter id of data store',
   `scope` varchar(255) COMMENT 'scope of data store',
   `uuid` varchar(255) COMMENT 'uuid of data store',
-  PRIMARY KEY(`id`),
-  CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`)
+  PRIMARY KEY(`id`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
 ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/3ed6200e/tools/apidoc/gen_toc.py
----------------------------------------------------------------------
diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py
index 1fe5e16..ab2456d 100644
--- a/tools/apidoc/gen_toc.py
+++ b/tools/apidoc/gen_toc.py
@@ -95,6 +95,7 @@ known_categories = {
     'InstanceGroup': 'VM Group',
     'StorageMaintenance': 'Storage Pool',
     'StoragePool': 'Storage Pool',
+    'StorageProvider': 'Storage Pool',
     'SecurityGroup': 'Security Group',
     'SSH': 'SSH',
     'register': 'Registration',


Mime
View raw message