incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From edi...@apache.org
Subject [33/50] [abbrv] squash changes into one giant patch
Date Wed, 20 Feb 2013 21:42:43 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
new file mode 100644
index 0000000..3ce14ee
--- /dev/null
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
@@ -0,0 +1,952 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.capacity.Capacity;
+import com.cloud.capacity.CapacityVO;
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.exception.DiscoveryException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.resource.ResourceManager;
+import com.cloud.server.ManagementServer;
+import com.cloud.storage.OCFS2Manager;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolDiscoverer;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.StoragePoolStatus;
+import com.cloud.storage.StoragePoolWorkVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.dao.StoragePoolWorkDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.Account;
+import com.cloud.user.User;
+import com.cloud.user.UserContext;
+import com.cloud.user.dao.UserDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.UriUtils;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.exception.ExecutionException;
+import com.cloud.vm.ConsoleProxyVO;
+import com.cloud.vm.DomainRouterVO;
+import com.cloud.vm.SecondaryStorageVmVO;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachine.State;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.ConsoleProxyDao;
+import com.cloud.vm.dao.DomainRouterDao;
+import com.cloud.vm.dao.SecondaryStorageVmDao;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+public class AncientPrimaryDataStoreLifeCyclImpl implements
+        PrimaryDataStoreLifeCycle {
+    private static final Logger s_logger = Logger
+            .getLogger(AncientPrimaryDataStoreLifeCyclImpl.class);
+    @Inject
+    protected ResourceManager _resourceMgr;
+    protected List<StoragePoolDiscoverer> _discoverers;
+    @Inject
+    PrimaryDataStoreDao primaryDataStoreDao;
+    @Inject
+    protected OCFS2Manager _ocfs2Mgr;
+    @Inject
+    DataStoreManager dataStoreMgr;
+    @Inject
+    AgentManager agentMgr;
+    @Inject
+    StorageManager storageMgr;
+    @Inject
+    protected CapacityDao _capacityDao;
+
+    @Inject
+    VolumeDao volumeDao;
+    @Inject
+    VMInstanceDao vmDao;
+    @Inject
+    ManagementServer server;
+    @Inject
+    protected VirtualMachineManager vmMgr;
+    @Inject
+    protected SecondaryStorageVmDao _secStrgDao;
+    @Inject
+    UserVmDao userVmDao;
+    @Inject
+    protected UserDao _userDao;
+    @Inject
+    protected DomainRouterDao _domrDao;
+    @Inject
+    protected StoragePoolHostDao _storagePoolHostDao;
+    @Inject
+    protected AlertManager _alertMgr;
+
+
+
+    @Inject
+    protected ConsoleProxyDao _consoleProxyDao;
+
+    @Inject
+    protected StoragePoolWorkDao _storagePoolWorkDao;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        String url = (String) dsInfos.get("url");
+        Long providerId = (Long)dsInfos.get("providerId");
+        if (clusterId != null && podId == null) {
+            throw new InvalidParameterValueException(
+                    "Cluster id requires pod id");
+        }
+
+        URI uri = null;
+        try {
+            uri = new URI(UriUtils.encodeURIComponent(url));
+            if (uri.getScheme() == null) {
+                throw new InvalidParameterValueException("scheme is null "
+                        + url + ", add nfs:// as a prefix");
+            } else if (uri.getScheme().equalsIgnoreCase("nfs")) {
+                String uriHost = uri.getHost();
+                String uriPath = uri.getPath();
+                if (uriHost == null || uriPath == null
+                        || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be nfs://hostname/path");
+                }
+            } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
+                String uriPath = uri.getPath();
+                if (uriPath == null) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be sharedmountpoint://localhost/path");
+                }
+            } else if (uri.getScheme().equalsIgnoreCase("rbd")) {
+                String uriPath = uri.getPath();
+                if (uriPath == null) {
+                    throw new InvalidParameterValueException(
+                            "host or path is null, should be rbd://hostname/pool");
+                }
+            }
+        } catch (URISyntaxException e) {
+            throw new InvalidParameterValueException(url
+                    + " is not a valid uri");
+        }
+
+        String tags = (String) dsInfos.get("tags");
+        Map<String, String> details = (Map<String, String>) dsInfos
+                .get("details");
+        if (tags != null) {
+            String[] tokens = tags.split(",");
+
+            for (String tag : tokens) {
+                tag = tag.trim();
+                if (tag.length() == 0) {
+                    continue;
+                }
+                details.put(tag, "true");
+            }
+        }
+
+        String scheme = uri.getScheme();
+        String storageHost = uri.getHost();
+        String hostPath = uri.getPath().replaceFirst("/", "");
+        String userInfo = uri.getUserInfo();
+        int port = uri.getPort();
+        StoragePoolVO pool = null;
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("createPool Params @ scheme - " + scheme
+                    + " storageHost - " + storageHost + " hostPath - "
+                    + hostPath + " port - " + port);
+        }
+        if (scheme.equalsIgnoreCase("nfs")) {
+            if (port == -1) {
+                port = 2049;
+            }
+            pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem,
+                    storageHost, port, hostPath);
+            if (clusterId == null) {
+                throw new IllegalArgumentException(
+                        "NFS need to have clusters specified for XenServers");
+            }
+        } else if (scheme.equalsIgnoreCase("file")) {
+            if (port == -1) {
+                port = 0;
+            }
+            pool = new StoragePoolVO(StoragePoolType.Filesystem,
+                    "localhost", 0, hostPath);
+        } else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
+            pool = new StoragePoolVO(StoragePoolType.SharedMountPoint,
+                    storageHost, 0, hostPath);
+        } else if (scheme.equalsIgnoreCase("clvm")) {
+            pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0,
+                    hostPath.replaceFirst("/", ""));
+        } else if (scheme.equalsIgnoreCase("rbd")) {
+            if (port == -1) {
+                port = 6789;
+            }
+            pool = new StoragePoolVO(StoragePoolType.RBD, storageHost,
+                    port, hostPath.replaceFirst("/", ""));
+            pool.setUserInfo(userInfo);
+        } else if (scheme.equalsIgnoreCase("PreSetup")) {
+            pool = new StoragePoolVO(StoragePoolType.PreSetup,
+                    storageHost, 0, hostPath);
+        } else if (scheme.equalsIgnoreCase("iscsi")) {
+            String[] tokens = hostPath.split("/");
+            int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
+            if (port == -1) {
+                port = 3260;
+            }
+            if (lun != -1) {
+                if (clusterId == null) {
+                    throw new IllegalArgumentException(
+                            "IscsiLUN need to have clusters specified");
+                }
+                hostPath.replaceFirst("/", "");
+                pool = new StoragePoolVO(StoragePoolType.IscsiLUN,
+                        storageHost, port, hostPath);
+            } else {
+                for (StoragePoolDiscoverer discoverer : _discoverers) {
+                    Map<StoragePoolVO, Map<String, String>> pools;
+                    try {
+                        pools = discoverer.find(zoneId, podId, uri, details);
+                    } catch (DiscoveryException e) {
+                        throw new IllegalArgumentException(
+                                "Not enough information for discovery " + uri,
+                                e);
+                    }
+                    if (pools != null) {
+                        Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
+                                .entrySet().iterator().next();
+                        pool = entry.getKey();
+                        details = entry.getValue();
+                        break;
+                    }
+                }
+            }
+        } else if (scheme.equalsIgnoreCase("iso")) {
+            if (port == -1) {
+                port = 2049;
+            }
+            pool = new StoragePoolVO(StoragePoolType.ISO, storageHost,
+                    port, hostPath);
+        } else if (scheme.equalsIgnoreCase("vmfs")) {
+            pool = new StoragePoolVO(StoragePoolType.VMFS,
+                    "VMFS datastore: " + hostPath, 0, hostPath);
+        } else if (scheme.equalsIgnoreCase("ocfs2")) {
+            port = 7777;
+            pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered",
+                    port, hostPath);
+        } else {
+            StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
+                
+            if (type != null) {
+                pool = new StoragePoolVO(type, storageHost,
+                        0, hostPath);
+            } else {
+            s_logger.warn("Unable to figure out the scheme for URI: " + uri);
+            throw new IllegalArgumentException(
+                    "Unable to figure out the scheme for URI: " + uri);
+            }
+        }
+
+        if (pool == null) {
+            s_logger.warn("Unable to figure out the scheme for URI: " + uri);
+            throw new IllegalArgumentException(
+                    "Unable to figure out the scheme for URI: " + uri);
+        }
+
+        Object localStorage = dsInfos.get("localStorage");
+        if (localStorage == null) {
+            List<StoragePoolVO> pools = primaryDataStoreDao
+                    .listPoolByHostPath(storageHost, hostPath);
+            if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
+                Long oldPodId = pools.get(0).getPodId();
+                throw new CloudRuntimeException("Storage pool " + uri
+                        + " already in use by another pod (id=" + oldPodId + ")");
+            }
+        }
+
+        long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id");
+        Object existingUuid = dsInfos.get("uuid");
+        String uuid = null;
+
+        if (existingUuid != null) {
+            uuid = (String)existingUuid;
+        } else if (scheme.equalsIgnoreCase("sharedmountpoint")
+                || scheme.equalsIgnoreCase("clvm")) {
+            uuid = UUID.randomUUID().toString();
+        } else if (scheme.equalsIgnoreCase("PreSetup")) {
+            uuid = hostPath.replace("/", "");
+        } else {
+            uuid = UUID.nameUUIDFromBytes(
+                    new String(storageHost + hostPath).getBytes()).toString();
+        }
+
+        List<StoragePoolVO> spHandles = primaryDataStoreDao
+                .findIfDuplicatePoolsExistByUUID(uuid);
+        if ((spHandles != null) && (spHandles.size() > 0)) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Another active pool with the same uuid already exists");
+            }
+            throw new CloudRuntimeException(
+                    "Another active pool with the same uuid already exists");
+        }
+
+        String poolName = (String) dsInfos.get("name");
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("In createPool Setting poolId - " + poolId
+                    + " uuid - " + uuid + " zoneId - " + zoneId + " podId - "
+                    + podId + " poolName - " + poolName);
+        }
+
+        pool.setId(poolId);
+        pool.setUuid(uuid);
+        pool.setDataCenterId(zoneId);
+        pool.setPodId(podId);
+        pool.setName(poolName);
+        pool.setClusterId(clusterId);
+        pool.setStorageProviderId(providerId);
+        pool.setStatus(StoragePoolStatus.Initialized);
+        pool = primaryDataStoreDao.persist(pool, details);
+
+        return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
+    }
+
+    protected boolean createStoragePool(long hostId, StoragePool pool) {
+        s_logger.debug("creating pool " + pool.getName() + " on  host "
+                + hostId);
+        if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
+                && pool.getPoolType() != StoragePoolType.Filesystem
+                && pool.getPoolType() != StoragePoolType.IscsiLUN
+                && pool.getPoolType() != StoragePoolType.Iscsi
+                && pool.getPoolType() != StoragePoolType.VMFS
+                && pool.getPoolType() != StoragePoolType.SharedMountPoint
+                && pool.getPoolType() != StoragePoolType.PreSetup
+                && pool.getPoolType() != StoragePoolType.OCFS2
+                && pool.getPoolType() != StoragePoolType.RBD
+                && pool.getPoolType() != StoragePoolType.CLVM) {
+            s_logger.warn(" Doesn't support storage pool type "
+                    + pool.getPoolType());
+            return false;
+        }
+        CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
+        final Answer answer = agentMgr.easySend(hostId, cmd);
+        if (answer != null && answer.getResult()) {
+            return true;
+        } else {
+            primaryDataStoreDao.expunge(pool.getId());
+            String msg = "";
+            if (answer != null) {
+                msg = "Can not create storage pool through host " + hostId
+                        + " due to " + answer.getDetails();
+                s_logger.warn(msg);
+            } else {
+                msg = "Can not create storage pool through host " + hostId
+                        + " due to CreateStoragePoolCommand returns null";
+                s_logger.warn(msg);
+            }
+            throw new CloudRuntimeException(msg);
+        }
+    }
+
+    @Override
+    public boolean attachCluster(DataStore store, ClusterScope scope) {
+        PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
+        // Check if there is host up in this cluster
+        List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
+                Host.Type.Routing, primarystore.getClusterId(),
+                primarystore.getPodId(), primarystore.getDataCenterId());
+        if (allHosts.isEmpty()) {
+            throw new CloudRuntimeException(
+                    "No host up to associate a storage pool with in cluster "
+                            + primarystore.getClusterId());
+        }
+
+        if (primarystore.getPoolType() == StoragePoolType.OCFS2
+                && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
+            s_logger.warn("Can not create storage pool " + primarystore
+                    + " on cluster " + primarystore.getClusterId());
+            primaryDataStoreDao.expunge(primarystore.getId());
+            return false;
+        }
+
+        boolean success = false;
+        for (HostVO h : allHosts) {
+            success = createStoragePool(h.getId(), primarystore);
+            if (success) {
+                break;
+            }
+        }
+
+        s_logger.debug("In createPool Adding the pool to each of the hosts");
+        List<HostVO> poolHosts = new ArrayList<HostVO>();
+        for (HostVO h : allHosts) {
+            try {
+                this.storageMgr.connectHostToSharedPool(h.getId(),
+                        primarystore.getId());
+                poolHosts.add(h);
+            } catch (Exception e) {
+                s_logger.warn("Unable to establish a connection between " + h
+                        + " and " + primarystore, e);
+            }
+        }
+
+        if (poolHosts.isEmpty()) {
+            s_logger.warn("No host can access storage pool " + primarystore
+                    + " on cluster " + primarystore.getClusterId());
+            primaryDataStoreDao.expunge(primarystore.getId());
+            return false;
+        } else {
+            storageMgr.createCapacityEntry(primarystore.getId());
+        }
+        StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
+        pool.setScope(ScopeType.CLUSTER);
+        pool.setStatus(StoragePoolStatus.Up);
+        this.primaryDataStoreDao.update(pool.getId(), pool);
+        return true;
+    }
+
+    @Override
+    public boolean attachZone(DataStore dataStore, ZoneScope scope) {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean dettach() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean unmanaged() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean maintain(long storeId) {
+        Long userId = UserContext.current().getCallerUserId();
+        User user = _userDao.findById(userId);
+        Account account = UserContext.current().getCaller();
+        StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId);
+        try {
+            StoragePool storagePool = (StoragePool) this.dataStoreMgr
+                    .getDataStore(storeId, DataStoreRole.Primary);
+            List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
+                    pool.getClusterId(), Status.Up);
+            if (hosts == null || hosts.size() == 0) {
+                pool.setStatus(StoragePoolStatus.Maintenance);
+                primaryDataStoreDao.update(pool.getId(), pool);
+                return true;
+            } else {
+                // set the pool state to prepare for maintenance
+                pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
+                primaryDataStoreDao.update(pool.getId(), pool);
+            }
+            // remove heartbeat
+            for (HostVO host : hosts) {
+                ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
+                        false, storagePool);
+                final Answer answer = agentMgr.easySend(host.getId(), cmd);
+                if (answer == null || !answer.getResult()) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("ModifyStoragePool false failed due to "
+                                + ((answer == null) ? "answer null" : answer
+                                        .getDetails()));
+                    }
+                } else {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("ModifyStoragePool false secceeded");
+                    }
+                }
+            }
+            // check to see if other ps exist
+            // if they do, then we can migrate over the system vms to them
+            // if they dont, then just stop all vms on this one
+            List<StoragePoolVO> upPools = primaryDataStoreDao
+                    .listByStatusInZone(pool.getDataCenterId(),
+                            DataStoreStatus.Up);
+            boolean restart = true;
+            if (upPools == null || upPools.size() == 0) {
+                restart = false;
+            }
+
+            // 2. Get a list of all the ROOT volumes within this storage pool
+            List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
+                    .getId());
+
+            // 3. Enqueue to the work queue
+            for (VolumeVO volume : allVolumes) {
+                VMInstanceVO vmInstance = vmDao
+                        .findById(volume.getInstanceId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // enqueue sp work
+                if (vmInstance.getState().equals(State.Running)
+                        || vmInstance.getState().equals(State.Starting)
+                        || vmInstance.getState().equals(State.Stopping)) {
+
+                    try {
+                        StoragePoolWorkVO work = new StoragePoolWorkVO(
+                                vmInstance.getId(), pool.getId(), false, false,
+                                server.getId());
+                        _storagePoolWorkDao.persist(work);
+                    } catch (Exception e) {
+                        if (s_logger.isDebugEnabled()) {
+                            s_logger.debug("Work record already exists, re-using by re-setting values");
+                        }
+                        StoragePoolWorkVO work = _storagePoolWorkDao
+                                .findByPoolIdAndVmId(pool.getId(),
+                                        vmInstance.getId());
+                        work.setStartedAfterMaintenance(false);
+                        work.setStoppedForMaintenance(false);
+                        work.setManagementServerId(server.getId());
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+            }
+
+            // 4. Process the queue
+            List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
+                    .listPendingWorkForPrepareForMaintenanceByPoolId(pool
+                            .getId());
+
+            for (StoragePoolWorkVO work : pendingWork) {
+                // shut down the running vms
+                VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // if the instance is of type consoleproxy, call the console
+                // proxy
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.ConsoleProxy)) {
+                    // call the consoleproxymanager
+                    ConsoleProxyVO consoleProxy = _consoleProxyDao
+                            .findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
+                        String errorMsg = "There was an error stopping the console proxy id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+
+                        if (this.vmMgr.advanceStart(consoleProxy, null, user,
+                                account) == null) {
+                            String errorMsg = "There was an error starting the console proxy id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+
+                // if the instance is of type uservm, call the user vm manager
+                if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
+                    UserVmVO userVm = userVmDao.findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(userVm, true, user, account)) {
+                        String errorMsg = "There was an error stopping the user vm id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type secondary storage vm, call the
+                // secondary storage vm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.SecondaryStorageVm)) {
+                    SecondaryStorageVmVO secStrgVm = _secStrgDao
+                            .findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
+                        String errorMsg = "There was an error stopping the ssvm id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+                        if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
+                            String errorMsg = "There was an error starting the ssvm id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+
+                // if the instance is of type domain router vm, call the network
+                // manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.DomainRouter)) {
+                    DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
+                    if (!vmMgr.advanceStop(domR, true, user, account)) {
+                        String errorMsg = "There was an error stopping the domain router id: "
+                                + vmInstance.getId()
+                                + " ,cannot enable primary storage maintenance";
+                        s_logger.warn(errorMsg);
+                        throw new CloudRuntimeException(errorMsg);
+                    } else {
+                        // update work status
+                        work.setStoppedForMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+
+                    if (restart) {
+                        if (vmMgr.advanceStart(domR, null, user, account) == null) {
+                            String errorMsg = "There was an error starting the domain router id: "
+                                    + vmInstance.getId()
+                                    + " on another storage pool, cannot enable primary storage maintenance";
+                            s_logger.warn(errorMsg);
+                        } else {
+                            // update work status
+                            work.setStartedAfterMaintenance(true);
+                            _storagePoolWorkDao.update(work.getId(), work);
+                        }
+                    }
+                }
+            }
+
+            // 5. Update the status
+            pool.setStatus(StoragePoolStatus.Maintenance);
+            this.primaryDataStoreDao.update(pool.getId(), pool);
+
+            return true;
+        } catch (Exception e) {
+            s_logger.error(
+                    "Exception in enabling primary storage maintenance:", e);
+            setPoolStateToError(pool);
+            throw new CloudRuntimeException(e.getMessage());
+        }
+    }
+
+    private void setPoolStateToError(StoragePoolVO primaryStorage) {
+        primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
+        this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage);
+    }
+
+    @Override
+    public boolean cancelMaintain(long storageId) {
+        // Change the storage state back to up
+        Long userId = UserContext.current().getCallerUserId();
+        User user = _userDao.findById(userId);
+        Account account = UserContext.current().getCaller();
+        StoragePoolVO poolVO = this.primaryDataStoreDao
+                .findById(storageId);
+        StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(
+                storageId, DataStoreRole.Primary);
+        poolVO.setStatus(StoragePoolStatus.Up);
+        primaryDataStoreDao.update(storageId, poolVO);
+
+        List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
+                pool.getClusterId(), Status.Up);
+        if (hosts == null || hosts.size() == 0) {
+            return true;
+        }
+        // add heartbeat
+        for (HostVO host : hosts) {
+            ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
+                    true, pool);
+            final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
+            if (answer == null || !answer.getResult()) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("ModifyStoragePool add failed due to "
+                            + ((answer == null) ? "answer null" : answer
+                                    .getDetails()));
+                }
+            } else {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("ModifyStoragePool add secceeded");
+                }
+            }
+        }
+
+        // 2. Get a list of pending work for this queue
+        List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
+                .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
+
+        // 3. work through the queue
+        for (StoragePoolWorkVO work : pendingWork) {
+            try {
+                VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
+
+                if (vmInstance == null) {
+                    continue;
+                }
+
+                // if the instance is of type consoleproxy, call the console
+                // proxy
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.ConsoleProxy)) {
+
+                    ConsoleProxyVO consoleProxy = _consoleProxyDao
+                            .findById(vmInstance.getId());
+                    if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
+                        String msg = "There was an error starting the console proxy id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type ssvm, call the ssvm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.SecondaryStorageVm)) {
+                    SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
+                            .getId());
+                    if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
+                        String msg = "There was an error starting the ssvm id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type ssvm, call the ssvm manager
+                if (vmInstance.getType().equals(
+                        VirtualMachine.Type.DomainRouter)) {
+                    DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
+                    if (vmMgr.advanceStart(domR, null, user, account) == null) {
+                        String msg = "There was an error starting the domR id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+
+                // if the instance is of type user vm, call the user vm manager
+                if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
+                    UserVmVO userVm = userVmDao.findById(vmInstance.getId());
+
+                    if (vmMgr.advanceStart(userVm, null, user, account) == null) {
+
+                        String msg = "There was an error starting the user vm id: "
+                                + vmInstance.getId()
+                                + " on storage pool, cannot complete primary storage maintenance";
+                        s_logger.warn(msg);
+                        throw new ExecutionException(msg);
+                    } else {
+                        // update work queue
+                        work.setStartedAfterMaintenance(true);
+                        _storagePoolWorkDao.update(work.getId(), work);
+                    }
+                }
+            } catch (Exception e) {
+                s_logger.debug("Failed start vm", e);
+                throw new CloudRuntimeException(e.toString());
+            }
+        }
+        return true;
+    }
+
+    @DB
+    @Override
+    public boolean deleteDataStore(long storeId) {
+        // for the given pool id, find all records in the storage_pool_host_ref
+        List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
+                .listByPoolId(storeId);
+        StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId);
+        StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary);
+        boolean deleteFlag = false;
+        Transaction txn = Transaction.currentTxn();
+        try {
+            // if not records exist, delete the given pool (base case)
+            if (hostPoolRecords.size() == 0) {
+
+                txn.start();
+                poolVO.setUuid(null);
+                this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
+                primaryDataStoreDao.remove(poolVO.getId());
+                deletePoolStats(poolVO.getId());
+                txn.commit();
+
+                deleteFlag = true;
+                return true;
+            } else {
+                // Remove the SR associated with the Xenserver
+                for (StoragePoolHostVO host : hostPoolRecords) {
+                    DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
+                            pool);
+                    final Answer answer = agentMgr.easySend(host.getHostId(),
+                            deleteCmd);
+
+                    if (answer != null && answer.getResult()) {
+                        deleteFlag = true;
+                        break;
+                    }
+                }
+            }
+        } finally {
+            if (deleteFlag) {
+                // now delete the storage_pool_host_ref and storage_pool records
+                txn.start();
+                for (StoragePoolHostVO host : hostPoolRecords) {
+                    _storagePoolHostDao.deleteStoragePoolHostDetails(
+                            host.getHostId(), host.getPoolId());
+                }
+                poolVO.setUuid(null);
+                this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
+                primaryDataStoreDao.remove(poolVO.getId());
+                deletePoolStats(poolVO.getId());
+                // Delete op_host_capacity entries
+                this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED,
+                        null, null, null, poolVO.getId());
+                txn.commit();
+
+                s_logger.debug("Storage pool id=" + poolVO.getId()
+                        + " is removed successfully");
+                return true;
+            } else {
+                // alert that the storage cleanup is required
+                s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId());
+                _alertMgr
+                        .sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE,
+                                poolVO.getDataCenterId(), poolVO.getPodId(),
+                                "Unable to delete storage pool id= " + poolVO.getId(),
+                                "Delete storage pool command failed.  Please check logs.");
+            }
+        }
+        return false;
+    }
+
+    @DB
+    private boolean deletePoolStats(Long poolId) {
+        CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId,
+                CapacityVO.CAPACITY_TYPE_STORAGE);
+        CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId,
+                CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
+        Transaction txn = Transaction.currentTxn();
+        txn.start();
+        if (capacity1 != null) {
+            _capacityDao.remove(capacity1.getId());
+        }
+
+        if (capacity2 != null) {
+            _capacityDao.remove(capacity2.getId());
+        }
+
+        txn.commit();
+        return true;
+    }
+
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+        StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId());
+        if (poolHost == null) {
+            poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath());
+            _storagePoolHostDao.persist(poolHost);
+        }
+       
+        StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
+        pool.setScope(scope.getScopeType());
+        pool.setAvailableBytes(existingInfo.getAvailableBytes());
+        pool.setCapacityBytes(existingInfo.getCapacityBytes());
+        pool.setStatus(StoragePoolStatus.Up);
+        this.primaryDataStoreDao.update(pool.getId(), pool);
+        this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes());
+        
+        return true;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java
index ffe7efd..5e8727a 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java
@@ -26,22 +26,22 @@ import javax.inject.Inject;
 import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
 import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
 import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
-import org.apache.cloudstack.storage.datastore.DataStoreStatus;
-import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.endpoint.EndPointSelector;
-import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper;
 import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
 
+import com.cloud.agent.api.StoragePoolInfo;
 import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.StoragePoolStatus;
 
 public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
     @Inject
@@ -58,9 +58,9 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
     }
     
     @Override
-    public DataStore initialize(Map<String, String> dsInfos) {
+    public DataStore initialize(Map<String, Object> dsInfos) {
         
-        PrimaryDataStoreVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); 
+        StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); 
         return providerMgr.getPrimaryDataStore(storeVO.getId());
     }
 
@@ -83,11 +83,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
     
     @Override
     public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
-        PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId());
+        StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId());
         dataStoreVO.setDataCenterId(scope.getZoneId());
         dataStoreVO.setPodId(scope.getPodId());
         dataStoreVO.setClusterId(scope.getScopeId());
-        dataStoreVO.setStatus(DataStoreStatus.Attaching);
+        dataStoreVO.setStatus(StoragePoolStatus.Attaching);
         dataStoreVO.setScope(scope.getScopeType());
         dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
         
@@ -95,7 +95,7 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
         attachCluster(dataStore);
         
         dataStoreVO = dataStoreDao.findById(dataStore.getId());
-        dataStoreVO.setStatus(DataStoreStatus.Up);
+        dataStoreVO.setStatus(StoragePoolStatus.Up);
         dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
         
         return true;
@@ -114,19 +114,19 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
     }
 
     @Override
-    public boolean maintain() {
+    public boolean maintain(long storeId) {
         // TODO Auto-generated method stub
         return false;
     }
 
     @Override
-    public boolean cancelMaintain() {
+    public boolean cancelMaintain(long storeId) {
         // TODO Auto-generated method stub
         return false;
     }
 
     @Override
-    public boolean deleteDataStore() {
+    public boolean deleteDataStore(long storeId) {
         // TODO Auto-generated method stub
         return false;
     }
@@ -139,4 +139,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
         return false;
     }
 
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope,
+            StoragePoolInfo existingInfo) {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java
index 1a24d87..fdbe4b4 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java
@@ -21,21 +21,21 @@ package org.apache.cloudstack.storage.datastore.manager;
 import java.util.HashMap;
 import java.util.Map;
 
+import javax.annotation.PostConstruct;
 import javax.inject.Inject;
 
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
 import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
 import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
-import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider;
-import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver;
 import org.springframework.stereotype.Component;
 
-import com.cloud.utils.component.ComponentContext;
-
 @Component
 public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProviderManager {
     @Inject
@@ -44,16 +44,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
     DataStoreProviderManager providerManager;
     @Inject
     PrimaryDataStoreDao dataStoreDao;
-    Map<String, PrimaryDataStoreDriver> driverMaps = new HashMap<String, PrimaryDataStoreDriver>();
+    Map<String, PrimaryDataStoreDriver> driverMaps;
 
+    @PostConstruct
+    public void config() {
+        driverMaps = new HashMap<String, PrimaryDataStoreDriver>();
+    }
+    
     @Override
     public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
-        PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStoreId);
+        StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
         long providerId = dataStoreVO.getStorageProviderId();
         DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId);
-        /*DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, 
-                driverMaps.get(provider.getUuid()),
-                provider);*/
         DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider);
         return dataStore;
     }
@@ -66,4 +68,16 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
         driverMaps.put(uuid, driver);
         return true;
     }
+
+    @Override
+    public PrimaryDataStore getPrimaryDataStore(String uuid) {
+        StoragePoolVO dataStoreVO = dataStoreDao.findByUuid(uuid);
+        return getPrimaryDataStore(dataStoreVO.getId());
+    }
+
+    @Override
+    public boolean registerHostListener(String uuid, HypervisorHostListener listener) {
+        // TODO Auto-generated method stub
+        return false;
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls
index 9386454..f159039 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls	
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls	
@@ -1,21 +1,3 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements. See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership. The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License. You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied. See the License for the
-  specific language governing permissions and limitations
-  under the License.
--->
 <class-diagram version="1.0.11" icons="true" always-add-relationships="false" generalizations="true" realizations="true" 
   associations="true" dependencies="false" nesting-relationships="true">  
   <interface id="1" corner="BOTTOM_RIGHT" language="java" 
@@ -36,18 +18,18 @@
       <operations public="true" package="true" protected="true" private="true"/>    
     </display>  
   </interface>  
-  <interface id="3" corner="BOTTOM_RIGHT" language="java" name="org.apache.cloudstack.storage.snapshot.SnapshotInfo" 
-    project="cloud-engine-storage" 
-    file="/cloud-engine-storage/src/org/apache/cloudstack/storage/snapshot/SnapshotInfo.java" binary="false">    
+  <interface id="3" corner="BOTTOM_RIGHT" language="java" 
+    name="org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo" project="cloud-engine-api" 
+    file="/cloud-engine-api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java" binary="false">    
     <position height="-1" width="-1" x="361" y="282"/>    
     <display autosize="true" stereotype="true" package="true" initial-value="false" signature="true" visibility="true">      
       <attributes public="true" package="true" protected="true" private="true"/>      
       <operations public="true" package="true" protected="true" private="true"/>    
     </display>  
   </interface>  
-  <interface id="4" corner="BOTTOM_RIGHT" language="java" name="org.apache.cloudstack.storage.image.TemplateInfo" 
-    project="cloud-engine-storage" 
-    file="/cloud-engine-storage/src/org/apache/cloudstack/storage/image/TemplateInfo.java" binary="false">    
+  <interface id="4" corner="BOTTOM_RIGHT" language="java" 
+    name="org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo" project="cloud-engine-api" 
+    file="/cloud-engine-api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java" binary="false">    
     <position height="-1" width="-1" x="573" y="292"/>    
     <display autosize="true" stereotype="true" package="true" initial-value="false" signature="true" visibility="true">      
       <attributes public="true" package="true" protected="true" private="true"/>      
@@ -72,4 +54,4 @@
     <operations public="true" package="true" protected="true" private="true"/>  
   </classifier-display>  
   <association-display labels="true" multiplicity="true"/>
-</class-diagram>
+</class-diagram>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
new file mode 100644
index 0000000..702ab23
--- /dev/null
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.provider;
+
+import java.util.Map;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
+import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl;
+import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver;
+import org.springframework.stereotype.Component;
+
+import com.cloud.utils.component.ComponentContext;
+
+@Component
+public class AncientPrimaryDataStoreProviderImpl implements
+        PrimaryDataStoreProvider {
+
+    private final String providerName = "ancient primary data store provider";
+    protected PrimaryDataStoreDriver driver;
+    @Inject
+    PrimaryDataStoreProviderManager storeMgr;
+    protected DataStoreLifeCycle lifecyle;
+    protected String uuid;
+    protected long id;
+    @Override
+    public String getName() {
+        return providerName;
+    }
+
+    @Override
+    public DataStoreLifeCycle getLifeCycle() {
+        return this.lifecyle;
+    }
+
+    @Override
+    public boolean configure(Map<String, Object> params) {
+        lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCyclImpl.class);
+        driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
+        uuid = (String)params.get("uuid");
+        id = (Long)params.get("id");
+        storeMgr.registerDriver(uuid, this.driver);
+        return true;
+    }
+
+    @Override
+    public String getUuid() {
+        return this.uuid;
+    }
+
+    @Override
+    public long getId() {
+        return this.id;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
new file mode 100644
index 0000000..f2cb1c4
--- /dev/null
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.provider;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.alert.AlertManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class DefaultHostListener implements HypervisorHostListener {
+    private static final Logger s_logger = Logger
+            .getLogger(DefaultHostListener.class);
+    @Inject AgentManager agentMgr;
+    @Inject DataStoreManager dataStoreMgr;
+    @Inject AlertManager alertMgr;
+    @Inject StoragePoolHostDao storagePoolHostDao;
+    @Inject PrimaryDataStoreDao primaryStoreDao;
+    @Override
+    public boolean hostConnect(long hostId, long poolId) {
+        StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
+        ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool);
+        final Answer answer = agentMgr.easySend(hostId, cmd);
+
+        if (answer == null) {
+            throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId());
+        }
+
+        if (!answer.getResult()) {
+            String msg = "Add host failed due to ModifyStoragePoolCommand failed" + answer.getDetails();
+            alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+            throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId());
+        }
+
+        assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId;
+        ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
+
+        StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId);
+        if (poolHost == null) {
+            poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
+            storagePoolHostDao.persist(poolHost);
+        } else {
+            poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
+        }
+        
+        StoragePoolVO poolVO = this.primaryStoreDao.findById(poolId);
+        poolVO.setAvailableBytes(mspAnswer.getPoolInfo().getAvailableBytes());
+        poolVO.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes());
+        primaryStoreDao.update(pool.getId(), poolVO);
+
+        s_logger.info("Connection established between " + pool + " host + " + hostId);
+        return true;
+    }
+
+    @Override
+    public boolean hostDisconnected(long hostId, long poolId) {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java
index 540ea63..85a5d02 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java
@@ -21,6 +21,7 @@ import java.util.Map;
 import javax.inject.Inject;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
 import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl;
 import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl;
@@ -35,6 +36,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
     protected PrimaryDataStoreDriver driver;
     @Inject
     PrimaryDataStoreProviderManager storeMgr;
+
     protected DataStoreLifeCycle lifecyle;
     protected String uuid;
     protected long id;
@@ -52,9 +54,11 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
     public boolean configure(Map<String, Object> params) {
         lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class);
         driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class);
+        HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class);
         uuid = (String)params.get("uuid");
         id = (Long)params.get("id");
         storeMgr.registerDriver(uuid, this.driver);
+        storeMgr.registerHostListener(uuid, listener);
         return true;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java
index 7679bb3..99b34cb 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java
@@ -18,9 +18,9 @@
  */
 package org.apache.cloudstack.storage.volume;
 
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
-import org.apache.cloudstack.storage.image.TemplateInfo;
 import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult;
 
 public interface TemplateInstallStrategy {

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java
index 80e098d..5f1735c 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java
@@ -20,24 +20,16 @@ package org.apache.cloudstack.storage.volume;
 
 import javax.inject.Inject;
 
-import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
-import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
-import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
+import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
 import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
-import org.apache.cloudstack.storage.db.ObjectInDataStoreVO;
-import org.apache.cloudstack.storage.image.ImageDataFactory;
-import org.apache.cloudstack.storage.image.TemplateInfo;
 import org.apache.cloudstack.storage.motion.DataMotionService;
 import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.utils.fsm.NoTransitionException;
-
 @Component
 public class TemplateInstallStrategyImpl implements TemplateInstallStrategy {
     private static final Logger s_logger = Logger
@@ -50,7 +42,7 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy {
     ImageDataFactory imageFactory;
     protected long waitingTime = 1800; // half an hour
     protected long waitingRetries = 10;
-
+/*
     protected TemplateInfo waitingForTemplateDownload(TemplateInfo template,
             PrimaryDataStore dataStore) {
         long retries = this.waitingRetries;
@@ -106,8 +98,8 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy {
         boolean freshNewTemplate = false;
         if (obj == null) {
             try {
-                /*templateOnPrimaryStoreObj = objectInDataStoreMgr.create(
-                        template, store);*/
+                templateOnPrimaryStoreObj = objectInDataStoreMgr.create(
+                        template, store);
                 freshNewTemplate = true;
             } catch (Throwable e) {
                 obj = objectInDataStoreMgr.findObject(template.getId(),
@@ -264,13 +256,10 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy {
             res.setResult(result.getResult());
             context.getParentCallback().complete(res);
         }
-        ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject(
-                templateOnPrimaryStoreObj.getId(), templateOnPrimaryStoreObj
-                        .getType(), templateOnPrimaryStoreObj.getDataStore()
-                        .getId(), templateOnPrimaryStoreObj.getDataStore()
-                        .getRole());
+        DataObjectInStore obj = objectInDataStoreMgr.findObject(
+                templateOnPrimaryStoreObj, templateOnPrimaryStoreObj.getDataStore());
+
 
-        obj.setInstallPath(result.getPath());
         CreateBaseImageResult res = new CreateBaseImageResult(
                 templateOnPrimaryStoreObj);
         try {
@@ -289,6 +278,12 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy {
         }
         context.getParentCallback().complete(res);
         return null;
+    }*/
+    @Override
+    public Void installAsync(TemplateInfo template, PrimaryDataStore store,
+            AsyncCompletionCallback<CreateBaseImageResult> callback) {
+        // TODO Auto-generated method stub
+        return null;
     }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
index 64af097..e0ecd16 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
@@ -20,21 +20,23 @@ package org.apache.cloudstack.storage.volume;
 
 import javax.inject.Inject;
 
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
-import org.apache.cloudstack.storage.datastore.DataStoreManager;
 import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
-import org.apache.cloudstack.storage.datastore.VolumeDataFactory;
-import org.apache.cloudstack.storage.db.ObjectInDataStoreVO;
-import org.apache.cloudstack.storage.volume.db.VolumeDao2;
-import org.apache.cloudstack.storage.volume.db.VolumeVO;
 import org.springframework.stereotype.Component;
 
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+
 @Component
 public class VolumeDataFactoryImpl implements VolumeDataFactory {
     @Inject
-    VolumeDao2 volumeDao;
+    VolumeDao volumeDao;
     @Inject
     ObjectInDataStoreManager objMap;
     @Inject
@@ -42,12 +44,30 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory {
     @Override
     public VolumeInfo getVolume(long volumeId, DataStore store) {
         VolumeVO volumeVO = volumeDao.findById(volumeId);
-        ObjectInDataStoreVO obj = objMap.findObject(volumeId, DataObjectType.VOLUME, store.getId(), store.getRole());
-        if (obj == null) {
-            VolumeObject vol = VolumeObject.getVolumeObject(null, volumeVO);
-            return vol;
-        }
+       
         VolumeObject vol = VolumeObject.getVolumeObject(store, volumeVO);
+     
+        return vol;
+    }
+    
+    @Override
+    public VolumeInfo getVolume(long volumeId) {
+        VolumeVO volumeVO = volumeDao.findById(volumeId);
+        VolumeObject vol = null;
+        if (volumeVO.getPoolId() == null) {
+            DataStore store = objMap.findStore(volumeVO.getUuid(), DataObjectType.VOLUME, DataStoreRole.Image);
+            vol = VolumeObject.getVolumeObject(store, volumeVO);
+        } else {
+            DataStore store = this.storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary);
+            vol = VolumeObject.getVolumeObject(store, volumeVO);
+        }
+        return vol;
+    }
+
+    @Override
+    public VolumeInfo getVolume(DataObject volume, DataStore store) {
+        VolumeInfo vol = (VolumeObject)getVolume(volume.getId(), store);
+        vol.addPayload(((VolumeInfo)volume).getpayload());
         return vol;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java
index 14d7417..f8d5043 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java
@@ -22,20 +22,17 @@ import java.lang.reflect.Method;
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ExecutionException;
 
 import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity;
 import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
 import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity;
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat;
 import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType;
-import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStoreEntityImpl;
-import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult;
-
-import com.cloud.utils.exception.CloudRuntimeException;
 
 public class VolumeEntityImpl implements VolumeEntity {
     private VolumeInfo volumeInfo;
@@ -167,7 +164,7 @@ public class VolumeEntityImpl implements VolumeEntity {
 
     @Override
     public void destroy() {
-        AsyncCallFuture<VolumeApiResult> future = vs.deleteVolumeAsync(volumeInfo);
+        /*AsyncCallFuture<VolumeApiResult> future = vs.deleteVolumeAsync(volumeInfo);
         try {
             result = future.get();
             if (!result.isSuccess()) {
@@ -177,7 +174,7 @@ public class VolumeEntityImpl implements VolumeEntity {
            throw new CloudRuntimeException("wait to delete volume info failed", e);
         } catch (ExecutionException e) {
             throw new CloudRuntimeException("wait to delete volume failed", e);
-        }
+        }*/
     }
 
 	@Override

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java
deleted file mode 100644
index f27753d..0000000
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cloudstack.storage.volume;
-
-import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile;
-import org.apache.cloudstack.storage.volume.db.VolumeVO;
-
-import com.cloud.storage.Volume;
-import com.cloud.storage.Volume.Event;
-import com.cloud.storage.Volume.State;
-import com.cloud.utils.fsm.NoTransitionException;
-import com.cloud.utils.fsm.StateMachine2;
-
-public interface VolumeManager {
-    VolumeVO allocateDuplicateVolume(VolumeVO oldVol);
-
-    VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException;
-
-    VolumeProfile getProfile(long volumeId);
-
-    VolumeVO getVolume(long volumeId);
-
-    VolumeVO updateVolume(VolumeVO volume);
-
-    /**
-     * @return
-     */
-    StateMachine2<State, Event, VolumeVO> getStateMachine();
-}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/c06e72de/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java
deleted file mode 100644
index bcff312..0000000
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cloudstack.storage.volume;
-
-import javax.inject.Inject;
-
-import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile;
-import org.apache.cloudstack.storage.volume.db.VolumeDao2;
-import org.apache.cloudstack.storage.volume.db.VolumeVO;
-import org.springframework.stereotype.Component;
-
-import com.cloud.storage.Volume;
-import com.cloud.storage.Volume.Event;
-import com.cloud.storage.Volume.State;
-import com.cloud.utils.fsm.NoTransitionException;
-import com.cloud.utils.fsm.StateMachine2;
-
-@Component
-public class VolumeManagerImpl implements VolumeManager {
-    @Inject
-    protected VolumeDao2 _volumeDao;
-    private final StateMachine2<State, Event, VolumeVO> s_fsm = new StateMachine2<State, Event, VolumeVO>();
-    public VolumeManagerImpl() {
-        initStateMachine();
-    }
-
-    @Override
-    public VolumeVO allocateDuplicateVolume(VolumeVO oldVol) {
-        /*
-        VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize());
-        newVol.setTemplateId(oldVol.getTemplateId());
-        newVol.setDeviceId(oldVol.getDeviceId());
-        newVol.setInstanceId(oldVol.getInstanceId());
-        newVol.setRecreatable(oldVol.isRecreatable());
-        newVol.setReservationId(oldVol.getReservationId());
-         */
-        return null;
-        // return _volumeDao.persist(newVol);
-    }
-
-    private void initStateMachine() {
-        s_fsm.addTransition(Volume.State.Allocated, Event.CreateRequested, Volume.State.Creating);
-        s_fsm.addTransition(Volume.State.Allocated, Event.DestroyRequested, Volume.State.Destroying);
-        s_fsm.addTransition(Volume.State.Creating, Event.OperationRetry, Volume.State.Creating);
-        s_fsm.addTransition(Volume.State.Creating, Event.OperationFailed, Volume.State.Allocated);
-        s_fsm.addTransition(Volume.State.Creating, Event.OperationSucceeded, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Creating, Event.DestroyRequested, Volume.State.Destroying);
-        s_fsm.addTransition(Volume.State.Creating, Event.CreateRequested, Volume.State.Creating);            
-        s_fsm.addTransition(Volume.State.Allocated, Event.UploadRequested, Volume.State.UploadOp);
-        s_fsm.addTransition(Volume.State.UploadOp, Event.CopyRequested, Volume.State.Creating);// CopyRequested for volume from sec to primary storage            
-        s_fsm.addTransition(Volume.State.Creating, Event.CopySucceeded, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Creating, Event.CopyFailed, Volume.State.UploadOp);// Copying volume from sec to primary failed.  
-        s_fsm.addTransition(Volume.State.UploadOp, Event.DestroyRequested, Volume.State.Destroying);
-        s_fsm.addTransition(Volume.State.Ready, Event.DestroyRequested, Volume.State.Destroying);
-        s_fsm.addTransition(Volume.State.Destroy, Event.ExpungingRequested, Volume.State.Expunging);
-        s_fsm.addTransition(Volume.State.Ready, Event.SnapshotRequested, Volume.State.Snapshotting);
-        s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationSucceeded, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationFailed, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Ready, Event.MigrationRequested, Volume.State.Migrating);
-        s_fsm.addTransition(Volume.State.Migrating, Event.OperationSucceeded, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Migrating, Event.OperationFailed, Volume.State.Ready);
-        s_fsm.addTransition(Volume.State.Destroy, Event.OperationSucceeded, Volume.State.Destroy);
-        s_fsm.addTransition(Volume.State.Destroying, Event.OperationSucceeded, Volume.State.Destroy);
-        s_fsm.addTransition(Volume.State.Destroying, Event.OperationFailed, Volume.State.Destroying);
-        s_fsm.addTransition(Volume.State.Destroying, Event.DestroyRequested, Volume.State.Destroying);
-    }
-
-    @Override
-    public StateMachine2<State, Event, VolumeVO> getStateMachine() {
-        return s_fsm;
-    }
-
-    @Override
-    public VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException {
-        // _volStateMachine.transitTo(vol, event, null, _volumeDao);
-        return _volumeDao.findById(vol.getId());
-    }
-
-    @Override
-    public VolumeProfile getProfile(long volumeId) {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public VolumeVO getVolume(long volumeId) {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public VolumeVO updateVolume(VolumeVO volume) {
-        // TODO Auto-generated method stub
-        return null;
-    }
-}


Mime
View raw message