incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bfede...@apache.org
Subject [42/50] [abbrv] Backs NFS-based secondary storage with an S3-compatible object store. Periodically, a reaper thread synchronizes templates and ISOs stored on a NFS secondary storage mount with a configured S3 object store. It also pushes snapshots to the
Date Wed, 19 Dec 2012 22:04:28 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/server/src/com/cloud/storage/s3/S3ManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/s3/S3ManagerImpl.java b/server/src/com/cloud/storage/s3/S3ManagerImpl.java
new file mode 100644
index 0000000..6b07254
--- /dev/null
+++ b/server/src/com/cloud/storage/s3/S3ManagerImpl.java
@@ -0,0 +1,669 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.storage.s3;
+
+import static com.cloud.storage.S3VO.ID_COLUMN_NAME;
+import static com.cloud.utils.DateUtil.now;
+import static com.cloud.utils.S3Utils.canConnect;
+import static com.cloud.utils.S3Utils.canReadWriteBucket;
+import static com.cloud.utils.S3Utils.checkBucketName;
+import static com.cloud.utils.S3Utils.checkClientOptions;
+import static com.cloud.utils.S3Utils.doesBucketExist;
+import static com.cloud.utils.StringUtils.join;
+import static com.cloud.utils.db.GlobalLock.executeWithNoWaitLock;
+import static com.cloud.utils.db.SearchCriteria.Op.EQ;
+import static java.lang.Boolean.TRUE;
+import static java.lang.String.format;
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.shuffle;
+import static java.util.Collections.singletonList;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+import javax.ejb.Local;
+import javax.naming.ConfigurationException;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteTemplateFromS3Command;
+import com.cloud.agent.api.DownloadTemplateFromS3ToSecondaryStorageCommand;
+import com.cloud.agent.api.UploadTemplateToS3FromSecondaryStorageCommand;
+import com.cloud.agent.api.to.S3TO;
+import com.cloud.api.commands.AddS3Cmd;
+import com.cloud.api.commands.ListS3sCmd;
+import com.cloud.configuration.Config;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.exception.DiscoveryException;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.storage.S3;
+import com.cloud.storage.S3VO;
+import com.cloud.storage.VMTemplateHostVO;
+import com.cloud.storage.VMTemplateS3VO;
+import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VMTemplateZoneVO;
+import com.cloud.storage.dao.S3Dao;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateHostDao;
+import com.cloud.storage.dao.VMTemplateS3Dao;
+import com.cloud.storage.dao.VMTemplateZoneDao;
+import com.cloud.storage.secondary.SecondaryStorageVmManager;
+import com.cloud.utils.S3Utils.ClientOptions;
+import com.cloud.utils.component.Inject;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@Local(value = { S3Manager.class })
+public class S3ManagerImpl implements S3Manager {
+
+    private static final Logger LOGGER = Logger.getLogger(S3ManagerImpl.class);
+
+    private String name;
+
+    @Inject
+    private AgentManager agentManager;
+
+    @Inject
+    private S3Dao s3Dao;
+
+    @Inject
+    private VMTemplateZoneDao vmTemplateZoneDao;
+
+    @Inject
+    private VMTemplateS3Dao vmTemplateS3Dao;
+
+    @Inject
+    private VMTemplateHostDao vmTemplateHostDao;
+
+    @Inject
+    private VMTemplateDao vmTemplateDao;
+
+    @Inject
+    private ConfigurationDao configurationDao;
+
+    @Inject
+    private DataCenterDao dataCenterDao;
+
+    @Inject
+    private HostDao hostDao;
+
+    @Inject
+    private SecondaryStorageVmManager secondaryStorageVMManager;
+
+    protected S3ManagerImpl() {
+        super();
+    }
+
+    private void verifyConnection(final S3TO s3) throws DiscoveryException {
+
+        if (!canConnect(s3)) {
+            throw new DiscoveryException(format("Unable to connect to S3 "
+                    + "using access key %1$s, secret key %2$s, and endpoint, "
+                    + "%3$S", s3.getAccessKey(), s3.getSecretKey(),
+                    s3.getEndPoint() != null ? s3.getEndPoint() : "default"));
+        }
+
+    }
+
+    private void verifyBuckets(S3TO s3) throws DiscoveryException {
+
+        final List<String> errorMessages = new ArrayList<String>();
+
+        errorMessages.addAll(verifyBucket(s3, s3.getBucketName()));
+
+        throwDiscoveryExceptionFromErrorMessages(errorMessages);
+
+    }
+
+    private List<String> verifyBucket(final ClientOptions clientOptions,
+            final String bucketName) {
+
+        if (!doesBucketExist(clientOptions, bucketName)) {
+            return singletonList(format("Bucket %1$s does not exist.",
+                    bucketName));
+        }
+
+        if (!canReadWriteBucket(clientOptions, bucketName)) {
+            return singletonList(format("Can read/write from bucket %1$s.",
+                    bucketName));
+        }
+
+        return emptyList();
+    }
+
+    private void validateFields(final S3VO s3VO) {
+
+        final List<String> errorMessages = new ArrayList<String>();
+
+        errorMessages.addAll(checkClientOptions(s3VO.toS3TO()));
+
+        errorMessages.addAll(checkBucketName("template", s3VO.getBucketName()));
+
+        throwDiscoveryExceptionFromErrorMessages(errorMessages);
+
+    }
+
+    private void enforceS3PreConditions() throws DiscoveryException {
+
+        if (!this.isS3Enabled()) {
+            throw new DiscoveryException("S3 is not enabled.");
+        }
+
+        if (this.getS3TO() != null) {
+            throw new DiscoveryException("Attempt to define multiple S3 "
+                    + "instances.  Only one instance definition is supported.");
+        }
+
+    }
+
+    private void throwDiscoveryExceptionFromErrorMessages(
+            final List<String> errorMessages) {
+
+        if (!errorMessages.isEmpty()) {
+            throw new CloudRuntimeException(join(errorMessages, " "));
+        }
+
+    }
+
+    @SuppressWarnings("unchecked")
+    private String determineLockId(final long accountId, final long templateId) {
+
+        // TBD The lock scope may be too coarse grained. Deletes need to lock
+        // the template across all zones where upload and download could
+        // probably safely scoped to the zone ...
+        return join(asList("S3_TEMPLATE", accountId, templateId), "_");
+
+    }
+
+    @Override
+    public S3TO getS3TO(final Long s3Id) {
+        return this.s3Dao.getS3TO(s3Id);
+    }
+
+    @Override
+    public S3TO getS3TO() {
+
+        final List<S3VO> s3s = this.s3Dao.listAll();
+
+        if (s3s == null || (s3s != null && s3s.isEmpty())) {
+            return null;
+        }
+
+        if (s3s.size() == 1) {
+            return s3s.get(0).toS3TO();
+        }
+
+        throw new CloudRuntimeException("Multiple S3 instances have been "
+                + "defined.  Only one instance configuration is supported.");
+
+    }
+
+    @Override
+    public S3 addS3(final AddS3Cmd addS3Cmd) throws DiscoveryException {
+
+        this.enforceS3PreConditions();
+
+        final S3VO s3VO = new S3VO(UUID.randomUUID().toString(),
+                addS3Cmd.getAccessKey(), addS3Cmd.getSecretKey(),
+                addS3Cmd.getEndPoint(), addS3Cmd.getBucketName(),
+                addS3Cmd.getHttpsFlag(), addS3Cmd.getConnectionTimeout(),
+                addS3Cmd.getMaxErrorRetry(), addS3Cmd.getSocketTimeout(), now());
+
+        this.validateFields(s3VO);
+
+        final S3TO s3 = s3VO.toS3TO();
+        this.verifyConnection(s3);
+        this.verifyBuckets(s3);
+
+        return this.s3Dao.persist(s3VO);
+
+    }
+
+    @Override
+    public boolean isS3Enabled() {
+        return Boolean
+                .valueOf(configurationDao.getValue(Config.S3Enable.key()));
+    }
+
+    @Override
+    public boolean isTemplateInstalled(final Long templateId) {
+        throw new UnsupportedOperationException(
+                "S3Manager#isTemplateInstalled (DeleteIsoCmd) has not yet "
+                        + "been implemented");
+    }
+
+    @Override
+    public void deleteTemplate(final Long templateId, final Long accountId) {
+
+        final S3TO s3 = getS3TO();
+
+        if (s3 == null) {
+            final String errorMessage = "Delete Template Failed: No S3 configuration defined.";
+            LOGGER.error(errorMessage);
+            throw new CloudRuntimeException(errorMessage);
+        }
+
+        final VMTemplateS3VO vmTemplateS3VO = vmTemplateS3Dao
+                .findOneByS3Template(s3.getId(), templateId);
+        if (vmTemplateS3VO == null) {
+            final String errorMessage = format(
+                    "Delete Template Failed: Unable to find Template %1$s in S3.",
+                    templateId);
+            LOGGER.error(errorMessage);
+            throw new CloudRuntimeException(errorMessage);
+        }
+
+        try {
+
+            executeWithNoWaitLock(determineLockId(accountId, templateId),
+                    new Callable<Void>() {
+
+                        @Override
+                        public Void call() throws Exception {
+
+                            final Answer answer = agentManager.sendToSSVM(null,
+                                    new DeleteTemplateFromS3Command(s3,
+                                            accountId, templateId));
+                            if (answer == null || !answer.getResult()) {
+                                final String errorMessage = format(
+                                        "Delete Template Failed: Unable to delete template id %1$s from S3 due to following error: %2$s",
+                                        templateId,
+                                        ((answer == null) ? "answer is null"
+                                                : answer.getDetails()));
+                                LOGGER.error(errorMessage);
+                                throw new CloudRuntimeException(errorMessage);
+                            }
+
+                            vmTemplateS3Dao.remove(vmTemplateS3VO.getId());
+                            LOGGER.debug(format(
+                                    "Deleted template %1$s from S3.",
+                                    templateId));
+
+                            return null;
+
+                        }
+
+                    });
+
+        } catch (Exception e) {
+
+            final String errorMessage = format(
+                    "Delete Template Failed: Unable to delete template id %1$s from S3 due to the following error: %2$s.",
+                    templateId, e.getMessage());
+            LOGGER.error(errorMessage);
+            throw new CloudRuntimeException(errorMessage, e);
+
+        }
+
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public String downloadTemplateFromS3ToSecondaryStorage(
+            final long dataCenterId, final long templateId,
+            final int primaryStorageDownloadWait) {
+
+        if (!isS3Enabled()) {
+            return null;
+        }
+
+        final VMTemplateVO template = vmTemplateDao.findById(templateId);
+        if (template == null) {
+            final String errorMessage = String
+                    .format("Failed to download template id %1$s from S3 because the template definition was not found.",
+                            templateId);
+            LOGGER.error(errorMessage);
+            return errorMessage;
+        }
+
+        final VMTemplateS3VO templateS3VO = findByTemplateId(templateId);
+        if (templateS3VO == null) {
+            final String errorMessage = format(
+                    "Failed to download template id %1$s from S3 because it does not exist in S3.",
+                    templateId);
+            LOGGER.error(errorMessage);
+            return errorMessage;
+        }
+
+        final S3TO s3 = getS3TO(templateS3VO.getS3Id());
+        if (s3 == null) {
+            final String errorMessage = format(
+                    "Failed to download template id %1$s from S3 because S3 id %2$s does not exist.",
+                    templateId, templateS3VO);
+            LOGGER.error(errorMessage);
+            return errorMessage;
+        }
+
+        final HostVO secondaryStorageHost = secondaryStorageVMManager
+                .findSecondaryStorageHost(dataCenterId);
+        if (secondaryStorageHost == null) {
+            final String errorMessage = format(
+                    "Unable to find secondary storage host for zone id %1$s.",
+                    dataCenterId);
+            LOGGER.error(errorMessage);
+            throw new CloudRuntimeException(errorMessage);
+        }
+
+        final long accountId = template.getAccountId();
+        final DownloadTemplateFromS3ToSecondaryStorageCommand cmd = new DownloadTemplateFromS3ToSecondaryStorageCommand(
+                s3, accountId, templateId, secondaryStorageHost.getName(),
+                primaryStorageDownloadWait);
+
+        try {
+
+            executeWithNoWaitLock(determineLockId(accountId, templateId),
+                    new Callable<Void>() {
+
+                        @Override
+                        public Void call() throws Exception {
+
+                            final Answer answer = agentManager.sendToSSVM(
+                                    dataCenterId, cmd);
+
+                            if (answer == null || !answer.getResult()) {
+                                final String errMsg = String
+                                        .format("Failed to download template from S3 to secondary storage due to %1$s",
+                                                (answer == null ? "answer is null"
+                                                        : answer.getDetails()));
+                                LOGGER.error(errMsg);
+                                throw new CloudRuntimeException(errMsg);
+                            }
+
+                            final String installPath = join(
+                                    asList("template", "tmpl", accountId,
+                                            templateId), File.separator);
+                            final VMTemplateHostVO tmpltHost = new VMTemplateHostVO(
+                                    secondaryStorageHost.getId(), templateId,
+                                    now(), 100, Status.DOWNLOADED, null, null,
+                                    null, installPath, template.getUrl());
+                            tmpltHost.setSize(templateS3VO.getSize());
+                            tmpltHost.setPhysicalSize(templateS3VO
+                                    .getPhysicalSize());
+                            vmTemplateHostDao.persist(tmpltHost);
+
+                            return null;
+
+                        }
+
+                    });
+
+        } catch (Exception e) {
+            final String errMsg = "Failed to download template from S3 to secondary storage due to "
+                    + e.toString();
+            LOGGER.error(errMsg);
+            throw new CloudRuntimeException(errMsg);
+        }
+
+        return null;
+
+    }
+
+    @Override
+    public List<S3VO> listS3s(final ListS3sCmd cmd) {
+
+        final Filter filter = new Filter(S3VO.class, ID_COLUMN_NAME, TRUE,
+                cmd.getStartIndex(), cmd.getPageSizeVal());
+        final SearchCriteria<S3VO> criteria = this.s3Dao.createSearchCriteria();
+
+        if (cmd.getId() != null) {
+            criteria.addAnd(ID_COLUMN_NAME, EQ, cmd.getId());
+        }
+
+        return this.s3Dao.search(criteria, filter);
+
+    }
+
+    @Override
+    public VMTemplateS3VO findByTemplateId(final Long templateId) {
+        throw new UnsupportedOperationException(
+                "S3Manager#findByTemplateId(Long) has not yet "
+                        + "been implemented");
+    }
+
+    @Override
+    public void propagateTemplatesToZone(final DataCenterVO zone) {
+
+        if (!isS3Enabled()) {
+            return;
+        }
+
+        final List<VMTemplateS3VO> s3VMTemplateRefs = this.vmTemplateS3Dao
+                .listAll();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(format("Propagating %1$s templates to zone %2$s.",
+                    s3VMTemplateRefs.size(), zone.getName()));
+        }
+
+        for (final VMTemplateS3VO templateS3VO : s3VMTemplateRefs) {
+            this.vmTemplateZoneDao.persist(new VMTemplateZoneVO(zone.getId(),
+                    templateS3VO.getTemplateId(), now()));
+        }
+
+    }
+
+    @Override
+    public boolean configure(final String name, final Map<String, Object> params)
+            throws ConfigurationException {
+
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(format("Configuring S3 Manager %1$s", name));
+        }
+
+        this.name = name;
+
+        return true;
+
+    }
+
+    @Override
+    public boolean start() {
+        LOGGER.info("Starting S3 Manager");
+        return true;
+    }
+
+    @Override
+    public boolean stop() {
+        LOGGER.info("Stopping S3 Manager");
+        return true;
+    }
+
+    @Override
+    public String getName() {
+        return this.name;
+    }
+
+    @Override
+    public void propagateTemplateToAllZones(final VMTemplateS3VO vmTemplateS3VO) {
+
+        final long templateId = vmTemplateS3VO.getId();
+
+        if (!isS3Enabled()) {
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace(format(
+                        "Attempt to propogate template id %1$s across all zones.  However, S3 is not enabled.",
+                        templateId));
+            }
+            return;
+
+        }
+
+        final S3TO s3 = getS3TO();
+
+        if (s3 == null) {
+            LOGGER.warn(format(
+                    "Unable to propagate template id %1$s across all zones because S3 is enabled, but not configured.",
+                    templateId));
+            return;
+        }
+
+        if (vmTemplateS3VO != null) {
+            final List<DataCenterVO> dataCenters = dataCenterDao.listAll();
+            for (DataCenterVO dataCenter : dataCenters) {
+                final VMTemplateZoneVO tmpltZoneVO = new VMTemplateZoneVO(
+                        dataCenter.getId(), templateId, now());
+                vmTemplateZoneDao.persist(tmpltZoneVO);
+            }
+        }
+
+    }
+
+    @Override
+    public Long chooseZoneForTemplateExtract(VMTemplateVO template) {
+
+        final S3TO s3 = getS3TO();
+
+        if (s3 == null) {
+            return null;
+        }
+
+        final List<VMTemplateHostVO> templateHosts = vmTemplateHostDao
+                .listByOnlyTemplateId(template.getId());
+        if (templateHosts != null) {
+            shuffle(templateHosts);
+            for (VMTemplateHostVO vmTemplateHostVO : templateHosts) {
+                final HostVO host = hostDao.findById(vmTemplateHostVO
+                        .getHostId());
+                if (host != null) {
+                    return host.getDataCenterId();
+                }
+                throw new CloudRuntimeException(
+                        format("Unable to find secondary storage host for template id %1$s.",
+                                template.getId()));
+            }
+        }
+
+        final List<DataCenterVO> dataCenters = dataCenterDao.listAll();
+        shuffle(dataCenters);
+        return dataCenters.get(0).getId();
+
+    }
+
+    @Override
+    public void uploadTemplateToS3FromSecondaryStorage(
+            final VMTemplateVO template) {
+
+        final Long templateId = template.getId();
+
+        final List<VMTemplateHostVO> templateHostRefs = vmTemplateHostDao
+                .listByTemplateId(templateId);
+
+        if (templateHostRefs == null
+                || (templateHostRefs != null && templateHostRefs.isEmpty())) {
+            throw new CloudRuntimeException(
+                    format("Attempt to sync template id %1$s that is not attached to a host.",
+                            templateId));
+        }
+
+        final VMTemplateHostVO templateHostRef = templateHostRefs.get(0);
+
+        if (!isS3Enabled()) {
+            return;
+        }
+
+        final S3TO s3 = getS3TO();
+        if (s3 == null) {
+            LOGGER.warn("S3 Template Sync Failed: Attempt to sync templates with S3, but no S3 instance defined.");
+            return;
+        }
+
+        final HostVO secondaryHost = this.hostDao.findById(templateHostRef
+                .getHostId());
+        if (secondaryHost == null) {
+            throw new CloudRuntimeException(format(
+                    "Unable to find secondary storage host id %1$s.",
+                    templateHostRef.getHostId()));
+        }
+
+        final Long dataCenterId = secondaryHost.getDataCenterId();
+        final Long accountId = template.getAccountId();
+
+        try {
+
+            executeWithNoWaitLock(determineLockId(accountId, templateId),
+                    new Callable<Void>() {
+
+                        @Override
+                        public Void call() throws Exception {
+
+                            final UploadTemplateToS3FromSecondaryStorageCommand cmd = new UploadTemplateToS3FromSecondaryStorageCommand(
+                                    s3, secondaryHost.getStorageUrl(),
+                                    dataCenterId, accountId, templateId);
+
+                            final Answer answer = agentManager.sendToSSVM(
+                                    dataCenterId, cmd);
+                            if (answer == null || !answer.getResult()) {
+
+                                final String reason = answer != null ? answer
+                                        .getDetails()
+                                        : "S3 template sync failed due to an unspecified error.";
+                                throw new CloudRuntimeException(
+                                        format("Failed to upload template id %1$s to S3 from secondary storage due to %2$s.",
+                                                templateId, reason));
+
+                            }
+
+                            if (LOGGER.isDebugEnabled()) {
+                                LOGGER.debug(format(
+                                        "Creating VMTemplateS3VO instance using template id %1s.",
+                                        templateId));
+                            }
+
+                            final VMTemplateS3VO vmTemplateS3VO = new VMTemplateS3VO(
+                                    s3.getId(), templateId, now(),
+                                    templateHostRef.getSize(), templateHostRef
+                                            .getPhysicalSize());
+
+                            if (LOGGER.isDebugEnabled()) {
+                                LOGGER.debug(format("Persisting %1$s",
+                                        vmTemplateS3VO));
+                            }
+
+                            vmTemplateS3Dao.persist(vmTemplateS3VO);
+                            propagateTemplateToAllZones(vmTemplateS3VO);
+
+                            return null;
+
+                        }
+
+                    });
+
+        } catch (Exception e) {
+
+            final String errorMessage = format(
+                    "Failed to upload template id %1$s for zone id %2$s to S3.",
+                    templateId, dataCenterId);
+            LOGGER.error(errorMessage, e);
+
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/server/src/com/cloud/storage/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/com/cloud/storage/snapshot/SnapshotManager.java
index a10298e..a7692de 100755
--- a/server/src/com/cloud/storage/snapshot/SnapshotManager.java
+++ b/server/src/com/cloud/storage/snapshot/SnapshotManager.java
@@ -128,6 +128,8 @@ public interface SnapshotManager {
 
     void downloadSnapshotsFromSwift(SnapshotVO ss);
 
+    void downloadSnapshotsFromS3(SnapshotVO snapshot);
+
     HostVO getSecondaryStorageHost(SnapshotVO snapshot);
 
     String getSecondaryStorageURL(SnapshotVO snapshot);

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
index 32e37e6..259feab 100755
--- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
+++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
@@ -17,6 +17,7 @@
 package com.cloud.storage.snapshot;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
@@ -34,9 +35,11 @@ import com.cloud.agent.api.BackupSnapshotCommand;
 import com.cloud.agent.api.Command;
 import com.cloud.agent.api.DeleteSnapshotBackupCommand;
 import com.cloud.agent.api.DeleteSnapshotsDirCommand;
+import com.cloud.agent.api.DownloadSnapshotFromS3Command;
 import com.cloud.agent.api.ManageSnapshotAnswer;
 import com.cloud.agent.api.ManageSnapshotCommand;
 import com.cloud.agent.api.downloadSnapshotFromSwiftCommand;
+import com.cloud.agent.api.to.S3TO;
 import com.cloud.agent.api.to.SwiftTO;
 import com.cloud.alert.AlertManager;
 import com.cloud.api.commands.CreateSnapshotPolicyCmd;
@@ -91,6 +94,7 @@ import com.cloud.storage.dao.SnapshotScheduleDao;
 import com.cloud.storage.dao.StoragePoolDao;
 import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.s3.S3Manager;
 import com.cloud.storage.secondary.SecondaryStorageVmManager;
 import com.cloud.storage.swift.SwiftManager;
 import com.cloud.tags.ResourceTagVO;
@@ -171,6 +175,8 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
     private ResourceLimitService _resourceLimitMgr;
     @Inject
     private SwiftManager _swiftMgr;
+    @Inject
+    private S3Manager _s3Mgr;
     @Inject 
     private SecondaryStorageVmManager _ssvmMgr;
     @Inject
@@ -477,11 +483,25 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
         return createdSnapshot;
     }
 
+    private static void checkObjectStorageConfiguration(SwiftTO swift, S3TO s3) {
+
+        if (swift != null && s3 != null) {
+            throw new CloudRuntimeException(
+                    "Swift and S3 are not simultaneously supported for snapshot backup.");
+        }
+
+    }
 
     @Override
     public void deleteSnapshotsForVolume (String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId ){
         SwiftTO swift = _swiftMgr.getSwiftTO();
-        DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(swift, secondaryStoragePoolUrl, dcId, accountId, volumeId, null, true);
+        S3TO s3 = _s3Mgr.getS3TO();
+
+        checkObjectStorageConfiguration(swift, s3);
+
+        DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(
+                swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId,
+                null, true);
         try {
             Answer ans = _agentMgr.sendToSSVM(dcId, cmd);
             if ( ans == null || !ans.getResult() ) {
@@ -543,6 +563,54 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
         
     }
 
+    private List<String> determineBackupUuids(final SnapshotVO snapshot) {
+
+        final List<String> backupUuids = new ArrayList<String>();
+        backupUuids.add(0, snapshot.getBackupSnapshotId());
+
+        SnapshotVO tempSnapshot = snapshot;
+        while (tempSnapshot.getPrevSnapshotId() != 0) {
+            tempSnapshot = _snapshotDao.findById(tempSnapshot
+                    .getPrevSnapshotId());
+            backupUuids.add(0, tempSnapshot.getBackupSnapshotId());
+        }
+
+        return Collections.unmodifiableList(backupUuids);
+    }
+
+    @Override
+    public void downloadSnapshotsFromS3(final SnapshotVO snapshot) {
+
+        final VolumeVO volume = _volsDao.findById(snapshot.getVolumeId());
+        final Long zoneId = volume.getDataCenterId();
+        final HostVO secHost = _storageMgr.getSecondaryStorageHost(zoneId);
+
+        final S3TO s3 = _s3Mgr.getS3TO(snapshot.getS3Id());
+        final List<String> backupUuids = determineBackupUuids(snapshot);
+
+        try {
+            String parent = null;
+            for (final String backupUuid : backupUuids) {
+                final DownloadSnapshotFromS3Command cmd = new DownloadSnapshotFromS3Command(
+                        s3, parent, secHost.getStorageUrl(), zoneId,
+                        volume.getAccountId(), volume.getId(), backupUuid,
+                        _backupsnapshotwait);
+                final Answer answer = _agentMgr.sendToSSVM(zoneId, cmd);
+                if ((answer == null) || !answer.getResult()) {
+                    throw new CloudRuntimeException(String.format(
+                            "S3 snapshot download failed due to %1$s.",
+                            answer != null ? answer.getDetails()
+                                    : "unspecified error"));
+                }
+                parent = backupUuid;
+            }
+        } catch (Exception e) {
+            throw new CloudRuntimeException(
+                    "Snapshot download from S3 failed due to " + e.toString(),
+                    e);
+        }
+
+    }
 
     @Override
     @DB
@@ -577,6 +645,9 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
 
 
             SwiftTO swift = _swiftMgr.getSwiftTO();
+            S3TO s3 = _s3Mgr.getS3TO();
+
+            checkObjectStorageConfiguration(swift, s3);
             
             long prevSnapshotId = snapshot.getPrevSnapshotId();
             if (prevSnapshotId > 0) {
@@ -586,7 +657,8 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
                         prevBackupUuid = prevSnapshot.getBackupSnapshotId();
                         prevSnapshotUuid = prevSnapshot.getPath();
                     }
-                } else if ( prevSnapshot.getSwiftId() != null && swift != null ) {
+                } else if ((prevSnapshot.getSwiftId() != null && swift != null)
+                        || (prevSnapshot.getS3Id() != null && s3 != null)) {
                     prevBackupUuid = prevSnapshot.getBackupSnapshotId();
                     prevSnapshotUuid = prevSnapshot.getPath();
                 }
@@ -599,8 +671,10 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
 
             if ( swift != null ) {
                 backupSnapshotCommand.setSwift(swift);
+            } else if (s3 != null) {
+                backupSnapshotCommand.setS3(s3);
             }
-            
+
             String backedUpSnapshotUuid = null;
             // By default, assume failed.
             boolean backedUp = false;
@@ -621,6 +695,9 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
                 if (backupSnapshotCommand.getSwift() != null ) {
                     snapshot.setSwiftId(swift.getId());
                     snapshot.setBackupSnapshotId(backedUpSnapshotUuid);
+                } else if (backupSnapshotCommand.getS3() != null) {
+                    snapshot.setS3Id(s3.getId());
+                    snapshot.setBackupSnapshotId(backedUpSnapshotUuid);
                 } else {
                     snapshot.setSecHostId(secHost.getId());
                     snapshot.setBackupSnapshotId(backedUpSnapshotUuid);
@@ -832,7 +909,13 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
             return true;
         }
         SwiftTO swift = _swiftMgr.getSwiftTO(snapshot.getSwiftId());
-        DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(swift, secondaryStoragePoolUrl, dcId, accountId, volumeId, backupOfSnapshot, false);
+        S3TO s3 = _s3Mgr.getS3TO();
+
+        checkObjectStorageConfiguration(swift, s3);
+
+        DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(
+                swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId,
+                backupOfSnapshot, false);
         Answer answer = _agentMgr.sendToSSVM(dcId, cmd);
 
         if ((answer != null) && answer.getResult()) {
@@ -979,9 +1062,15 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
             }
             List<HostVO> ssHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(dcId);
             SwiftTO swift = _swiftMgr.getSwiftTO();
-            if (swift == null) {
+            S3TO s3 = _s3Mgr.getS3TO();
+
+            checkObjectStorageConfiguration(swift, s3);
+
+            if (swift == null && s3 == null) {
                 for (HostVO ssHost : ssHosts) {
-                    DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(null, ssHost.getStorageUrl(), dcId, accountId, volumeId, "", true);
+                    DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(
+                            null, null, ssHost.getStorageUrl(), dcId,
+                            accountId, volumeId, "", true);
                     Answer answer = null;
                     try {
                         answer = _agentMgr.sendToSSVM(dcId, cmd);
@@ -998,12 +1087,14 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
                     }
                 }
             } else {
-                DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(swift, "", dcId, accountId, volumeId, "", true);
+                DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand(
+                        swift, s3, "", dcId, accountId, volumeId, "", true);
                 Answer answer = null;
                 try {
                     answer = _agentMgr.sendToSSVM(dcId, cmd);
                 } catch (Exception e) {
-                    s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on swift");
+                    final String storeType = s3 != null ? "S3" : "swift";
+                    s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on " + storeType);
                 }
                 if ((answer != null) && answer.getResult()) {
                     s_logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId);

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/server/src/com/cloud/template/S3SyncTask.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/S3SyncTask.java b/server/src/com/cloud/template/S3SyncTask.java
new file mode 100644
index 0000000..ed179dc
--- /dev/null
+++ b/server/src/com/cloud/template/S3SyncTask.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.template;
+
+import static java.lang.String.*;
+
+import java.util.List;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.to.S3TO;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.s3.S3Manager;
+
+final class S3SyncTask implements Runnable {
+
+    private static final Logger LOGGER = Logger.getLogger(S3SyncTask.class);
+
+    private final VMTemplateDao vmTemplateDao;
+    private final S3Manager s3Mgr;
+
+    S3SyncTask(final VMTemplateDao vmTemplateDao, final S3Manager s3Mgr) {
+
+        super();
+
+        assert vmTemplateDao != null;
+        assert s3Mgr != null;
+
+        this.vmTemplateDao = vmTemplateDao;
+        this.s3Mgr = s3Mgr;
+
+    }
+
+    @Override
+    public void run() {
+
+        try {
+
+            final S3TO s3 = s3Mgr.getS3TO();
+
+            if (s3 == null) {
+                LOGGER.warn("S3 sync skipped because no S3 instance is configured.");
+                return;
+            }
+
+            final List<VMTemplateVO> candidateTemplates = vmTemplateDao
+                    .findTemplatesToSyncToS3();
+
+            if (candidateTemplates.isEmpty()) {
+                LOGGER.debug("All templates are synced with S3.");
+                return;
+            }
+
+            for (VMTemplateVO candidateTemplate : candidateTemplates) {
+
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(format(
+                            "Uploading template %1$s (id: %2$s) to S3.",
+                            candidateTemplate.getName(),
+                            candidateTemplate.getId()));
+                }
+
+                s3Mgr.uploadTemplateToS3FromSecondaryStorage(candidateTemplate);
+
+            }
+
+            LOGGER.debug("Completed S3 template sync task.");
+
+        } catch (Exception e) {
+            LOGGER.warn(
+                    "S3 Sync Task ignored exception, and will continue to execute.",
+                    e);
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/server/src/com/cloud/template/TemplateManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java
index 1e87de2..82c31f1 100755
--- a/server/src/com/cloud/template/TemplateManagerImpl.java
+++ b/server/src/com/cloud/template/TemplateManagerImpl.java
@@ -113,10 +113,12 @@ import com.cloud.storage.dao.UploadDao;
 import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VMTemplateHostDao;
 import com.cloud.storage.dao.VMTemplatePoolDao;
+import com.cloud.storage.dao.VMTemplateS3Dao;
 import com.cloud.storage.dao.VMTemplateSwiftDao;
 import com.cloud.storage.dao.VMTemplateZoneDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.download.DownloadMonitor;
+import com.cloud.storage.s3.S3Manager;
 import com.cloud.storage.secondary.SecondaryStorageVmManager;
 import com.cloud.storage.swift.SwiftManager;
 import com.cloud.storage.upload.UploadMonitor;
@@ -179,8 +181,12 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
     @Inject
     SwiftManager _swiftMgr;
     @Inject
+    S3Manager _s3Mgr;
+    @Inject
     VMTemplateSwiftDao _tmpltSwiftDao;
     @Inject
+    VMTemplateS3Dao _vmS3TemplateDao;
+    @Inject
     ConfigurationDao _configDao;
     @Inject
     ClusterDao _clusterDao;
@@ -207,6 +213,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
     ExecutorService _preloadExecutor;
     ScheduledExecutorService _swiftTemplateSyncExecutor;
     
+    private ScheduledExecutorService _s3TemplateSyncExecutor = null;
 
     @Inject (adapter=TemplateAdapter.class)
     protected Adapters<TemplateAdapter> _adapters;
@@ -344,10 +351,14 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
             }
         }
         
-        if (zoneId == null) {
+		if (zoneId == null && _swiftMgr.isSwiftEnabled()) {
             zoneId = _swiftMgr.chooseZoneForTmpltExtract(templateId);
         }
 
+        if (zoneId == null && _s3Mgr.isS3Enabled()) {
+            zoneId = _s3Mgr.chooseZoneForTemplateExtract(template);
+        }
+
         if (_dcDao.findById(zoneId) == null) {
             throw new IllegalArgumentException("Please specify a valid zone.");
         }
@@ -380,7 +391,13 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
             if (swift != null && sservers != null) {
                 downloadTemplateFromSwiftToSecondaryStorage(zoneId, templateId);
             }
+        } else if (tmpltHostRef == null && _s3Mgr.isS3Enabled()) {
+            if (sservers != null) {
+                _s3Mgr.downloadTemplateFromS3ToSecondaryStorage(zoneId,
+                        templateId, _primaryStorageDownloadWait);
+            }
         }
+
         if (tmpltHostRef == null) {
             throw new InvalidParameterValueException("The " + desc + " has not been downloaded ");
         }
@@ -594,6 +611,12 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
                 s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
                 return null;
             }
+            result = _s3Mgr.downloadTemplateFromS3ToSecondaryStorage(dcId,
+                    templateId, _primaryStorageDownloadWait);
+            if (result != null) {
+                s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
+                return null;
+            }
             templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool);
             if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) {
                 s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
@@ -708,6 +731,12 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
                 s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
                 return null;
             }
+            result = _s3Mgr.downloadTemplateFromS3ToSecondaryStorage(dcId,
+                    templateId, _primaryStorageDownloadWait);
+            if (result != null) {
+                s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
+                return null;
+            }
             templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool);
             if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) {
                 s_logger.error("Unable to find a secondary storage host who has completely downloaded the template.");
@@ -823,6 +852,12 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
         if (_swiftMgr.isSwiftEnabled()) {
             throw new CloudRuntimeException("copytemplate API is disabled in Swift setup, templates in Swift can be accessed by all Zones");
         }
+
+        if (_s3Mgr.isS3Enabled()) {
+            throw new CloudRuntimeException(
+                    "copytemplate API is disabled in S3 setup -- S3 templates are accessible in all zones.");
+        }
+
         //Verify parameters
         if (sourceZoneId == destZoneId) {
             throw new InvalidParameterValueException("Please specify different source and destination zones.");
@@ -1003,12 +1038,32 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
     @Override
     public boolean start() {
         _swiftTemplateSyncExecutor.scheduleAtFixedRate(getSwiftTemplateSyncTask(), 60, 60, TimeUnit.SECONDS);
+
+        if (_s3TemplateSyncExecutor != null) {
+
+            final int initialDelay = 60;
+            final int period = 60;
+
+            _s3TemplateSyncExecutor.scheduleAtFixedRate(new S3SyncTask(
+                    this._tmpltDao, this._s3Mgr), initialDelay, period,
+                    TimeUnit.SECONDS);
+            s_logger.info(String.format("Started S3 sync task to execute "
+                    + "execute every %1$s after an initial delay of %2$s.",
+                    period, initialDelay));
+
+        }
+
         return true;
     }
 
     @Override
     public boolean stop() {
         _swiftTemplateSyncExecutor.shutdownNow();
+
+        if (_s3TemplateSyncExecutor != null) {
+            _s3TemplateSyncExecutor.shutdownNow();
+        }
+
         return true;
     }
 
@@ -1041,7 +1096,16 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
         _storagePoolMaxWaitSeconds = NumbersUtil.parseInt(_configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
         _preloadExecutor = Executors.newFixedThreadPool(8, new NamedThreadFactory("Template-Preloader"));
         _swiftTemplateSyncExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("swift-template-sync-Executor"));
-        return false;
+
+        if (_s3Mgr.isS3Enabled()) {
+            _s3TemplateSyncExecutor = Executors
+                    .newSingleThreadScheduledExecutor(new NamedThreadFactory(
+                            "s3-template-sync"));
+        } else {
+            s_logger.info("S3 secondary storage synchronization is disabled.");
+        }
+
+      return false;
     }
     
     protected TemplateManagerImpl() {
@@ -1195,13 +1259,19 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
         if (cmd.getZoneId() == null && _swiftMgr.isSwiftEnabled()) {
             _swiftMgr.deleteTemplate(cmd);
         }
+        if (cmd.getZoneId() == null && _s3Mgr.isS3Enabled()) {
+            _s3Mgr.deleteTemplate(cmd.getId(), caller.getAccountId());
+        }
+
     	TemplateAdapter adapter = getAdapter(template.getHypervisorType());
     	TemplateProfile profile = adapter.prepareDelete(cmd);
     	boolean result = adapter.delete(profile);
     	
     	if (result){
-            if (cmd.getZoneId() == null && _swiftMgr.isSwiftEnabled()) {
-                List<VMTemplateZoneVO> templateZones = _tmpltZoneDao.listByZoneTemplate(null, templateId);
+            if (cmd.getZoneId() == null
+                    && (_swiftMgr.isSwiftEnabled() || _s3Mgr.isS3Enabled())) {
+                List<VMTemplateZoneVO> templateZones = _tmpltZoneDao
+                        .listByZoneTemplate(null, templateId);
                 if (templateZones != null) {
                     for (VMTemplateZoneVO templateZone : templateZones) {
                         _tmpltZoneDao.remove(templateZone.getId());
@@ -1234,6 +1304,10 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
         if (cmd.getZoneId() == null && _swiftMgr.isSwiftEnabled()) {
             _swiftMgr.deleteIso(cmd);
     	}
+        if (cmd.getZoneId() == null && _s3Mgr.isS3Enabled()) {
+            _s3Mgr.deleteTemplate(caller.getAccountId(), templateId);
+        }
+
     	if (zoneId != null && (_ssvmMgr.findSecondaryStorageHost(zoneId) == null)) {
     		throw new InvalidParameterValueException("Failed to find a secondary storage host in the specified zone.");
     	}
@@ -1241,8 +1315,10 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
     	TemplateProfile profile = adapter.prepareDelete(cmd);
         boolean result = adapter.delete(profile);
         if (result) {
-            if (cmd.getZoneId() == null && _swiftMgr.isSwiftEnabled()) {
-                List<VMTemplateZoneVO> templateZones = _tmpltZoneDao.listByZoneTemplate(null, templateId);
+            if (cmd.getZoneId() == null
+                    && (_swiftMgr.isSwiftEnabled() || _s3Mgr.isS3Enabled())) {
+                List<VMTemplateZoneVO> templateZones = _tmpltZoneDao
+                        .listByZoneTemplate(null, templateId);
                 if (templateZones != null) {
                     for (VMTemplateZoneVO templateZone : templateZones) {
                         _tmpltZoneDao.remove(templateZone.getId());

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/setup/db/create-schema.sql
----------------------------------------------------------------------
diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql
index b0457d7..8010e3b 100755
--- a/setup/db/create-schema.sql
+++ b/setup/db/create-schema.sql
@@ -146,6 +146,8 @@ DROP TABLE IF EXISTS `cloud`.`s2s_vpn_gateway`;
 DROP TABLE IF EXISTS `cloud`.`s2s_vpn_connection`;
 DROP TABLE IF EXISTS `cloud`,`external_nicira_nvp_devices`;
 DROP TABLE IF EXISTS `cloud`,`nicira_nvp_nic_map`;
+DROP TABLE IF EXISTS `cloud`,`s3`;
+DROP TABLE IF EXISTS `cloud`,`template_s3_ref`;
 DROP TABLE IF EXISTS `cloud`,`nicira_nvp_router_map`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroup_policy_map`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_policy_condition_map`;
@@ -164,7 +166,7 @@ CREATE TABLE `cloud`.`version` (
   INDEX `i_version__version`(`version`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
-INSERT INTO `version` (`version`, `updated`, `step`) VALUES('@VERSION@', now(), 'Complete');
+INSERT INTO `version` (`version`, `updated`, `step`) VALUES('4.0.0.2012-09-12T14:47:37Z', now(), 'Complete');
 
 CREATE TABLE `cloud`.`op_it_work` (
   `id` char(40) COMMENT 'reservation id',
@@ -480,12 +482,14 @@ CREATE TABLE `cloud`.`snapshots` (
   `removed` datetime COMMENT 'Date removed.  not null if removed',
   `backup_snap_id` varchar(255) COMMENT 'Back up uuid of the snapshot',
   `swift_id` bigint unsigned COMMENT 'which swift',
+  `s3_id` bigint unsigned COMMENT 'S3 to which this snapshot will be stored',
   `sechost_id` bigint unsigned COMMENT 'secondary storage host id',
   `prev_snap_id` bigint unsigned COMMENT 'Id of the most recent snapshot',
   `hypervisor_type` varchar(32) NOT NULL COMMENT 'hypervisor that the snapshot was taken under',
   `version` varchar(32) COMMENT 'snapshot version',
   PRIMARY KEY (`id`),
   CONSTRAINT `uc_snapshots__uuid` UNIQUE (`uuid`),
+  CONSTRAINT `fk_snapshots__s3_id` FOREIGN KEY `fk_snapshots__s3_id` (`s3_id`) REFERENCES `s3` (`id`),
   INDEX `i_snapshots__removed`(`removed`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
@@ -1850,6 +1854,36 @@ CREATE TABLE `cloud`.`swift` (
   CONSTRAINT `uc_swift__uuid` UNIQUE (`uuid`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
+CREATE TABLE `cloud`.`s3` (
+  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
+  `uuid` varchar(40),
+  `access_key` varchar(20) NOT NULL COMMENT ' The S3 access key',
+  `secret_key` varchar(40) NOT NULL COMMENT ' The S3 secret key',
+  `end_point` varchar(1024) COMMENT ' The S3 host',
+  `bucket` varchar(63) NOT NULL COMMENT ' The S3 host',
+  `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS',
+  `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.',
+  `max_error_retry` integer  COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).',
+  `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.',
+  `created` datetime COMMENT 'date the s3 first signed on',
+  PRIMARY KEY (`id`),
+  CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `cloud`.`template_s3_ref` (
+  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
+  `s3_id` bigint unsigned NOT NULL COMMENT ' Associated S3 instance id',
+  `template_id` bigint unsigned NOT NULL COMMENT ' Associated template id',
+  `created` DATETIME NOT NULL COMMENT ' The creation timestamp',
+  `size` bigint unsigned COMMENT ' The size of the object',
+  `physical_size` bigint unsigned DEFAULT 0 COMMENT ' The physical size of the object',
+  PRIMARY KEY (`id`),
+  CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`),
+  CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE,
+  CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`),
+  INDEX `i_template_s3_ref__s3_id`(`s3_id`),
+  INDEX `i_template_s3_ref__template_id`(`template_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
 CREATE TABLE `cloud`.`op_host_transfer` (
   `id` bigint unsigned UNIQUE NOT NULL COMMENT 'Id of the host',

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/setup/db/db/schema-40to410.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql
new file mode 100644
index 0000000..b0301d7
--- /dev/null
+++ b/setup/db/db/schema-40to410.sql
@@ -0,0 +1,58 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.0.0 to 4.1.0;
+--;
+
+CREATE TABLE `cloud`.`s3` (
+  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
+  `uuid` varchar(40),
+  `access_key` varchar(20) NOT NULL COMMENT ' The S3 access key',
+  `secret_key` varchar(40) NOT NULL COMMENT ' The S3 secret key',
+  `end_point` varchar(1024) COMMENT ' The S3 host',
+  `bucket` varchar(63) NOT NULL COMMENT ' The S3 host',
+  `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS',
+  `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.',
+  `max_error_retry` integer  COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).',
+  `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.',
+  `created` datetime COMMENT 'date the s3 first signed on',
+  PRIMARY KEY (`id`),
+  CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `cloud`.`template_s3_ref` (
+  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
+  `s3_id` bigint unsigned NOT NULL COMMENT ' Associated S3 instance id',
+  `template_id` bigint unsigned NOT NULL COMMENT ' Associated template id',
+  `created` DATETIME NOT NULL COMMENT ' The creation timestamp',
+  `size` bigint unsigned COMMENT ' The size of the object',
+  `physical_size` bigint unsigned DEFAULT 0 COMMENT ' The physical size of the object',
+  PRIMARY KEY (`id`),
+  CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`),
+  CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE,
+  CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`),
+  INDEX `i_template_s3_ref__swift_id`(`s3_id`),
+  INDEX `i_template_s3_ref__template_id`(`template_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.enable', 'false', 'enable s3');
+
+ALTER TABLE `cloud`.`snapshots` ADD COLUMN `s3_id` bigint unsigned COMMENT 'S3 to which this snapshot will be stored';
+
+ALTER TABLE `cloud`.`snapshots` ADD CONSTRAINT `fk_snapshots__s3_id` FOREIGN KEY `fk_snapshots__s3_id` (`s3_id`) REFERENCES `s3` (`id`);
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/tools/apidoc/gen_toc.py
----------------------------------------------------------------------
diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py
index eeaf2a2..abff8d1 100644
--- a/tools/apidoc/gen_toc.py
+++ b/tools/apidoc/gen_toc.py
@@ -116,6 +116,7 @@ known_categories = {
     'LB': 'Load Balancer',
     'ldap': 'LDAP',
     'Swift': 'Swift',
+    'S3' : 'S3',
     'SecondaryStorage': 'Host',
     'Project': 'Project',
     'Lun': 'Storage',

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/tools/marvin/marvin/cloudstackConnection.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/cloudstackConnection.py b/tools/marvin/marvin/cloudstackConnection.py
index c805213..8c4e325 100644
--- a/tools/marvin/marvin/cloudstackConnection.py
+++ b/tools/marvin/marvin/cloudstackConnection.py
@@ -42,7 +42,7 @@ class cloudConnection(object):
         else:
             self.protocol=protocol
         self.path = path
-        if port == 8096:
+        if port == 8096 or (self.apiKey == None and self.securityKey == None):
             self.auth = False
         else:
             self.auth = True

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/tools/marvin/marvin/deployDataCenter.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py
index bdf08cc..01235fe 100644
--- a/tools/marvin/marvin/deployDataCenter.py
+++ b/tools/marvin/marvin/deployDataCenter.py
@@ -399,9 +399,9 @@ class deployDataCenters():
                                              logging=self.testClientLogger)
 
         """config database"""
-        dbSvr = self.config.dbSvr
-        self.testClient.dbConfigure(dbSvr.dbSvr, dbSvr.port, dbSvr.user, \
-                                    dbSvr.passwd, dbSvr.db)
+        #dbSvr = self.config.dbSvr
+        #self.testClient.dbConfigure(dbSvr.dbSvr, dbSvr.port, dbSvr.user, \
+        #                            dbSvr.passwd, dbSvr.db)
         self.apiClient = self.testClient.getApiClient()
 
     def updateConfiguration(self, globalCfg):

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/ui/dictionary.jsp
----------------------------------------------------------------------
diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp
index e72481e..20b35bd 100644
--- a/ui/dictionary.jsp
+++ b/ui/dictionary.jsp
@@ -25,6 +25,17 @@ under the License.
 <% long now = System.currentTimeMillis(); %>
 <script language="javascript">
 dictionary = {
+'label.enable.s3': '<fmt:message key="label.enable.s3"/>',
+'confirm.enable.s3': '<fmt:message key="confirm.enable.s3"/>',
+'message.after.enable.s3': '<fmt:message key="message.after.enable.s3"/>',
+'label.s3.access_key': '<fmt:message key="label.s3.access_key"/>',
+'label.s3.secret_key': '<fmt:message key="label.s3.secret_key"/>',
+'label.s3.bucket': '<fmt:message key="label.s3.bucket"/>',
+'label.s3.endpoint': '<fmt:message key="label.s3.endpoint"/>',
+'label.s3.use_https': '<fmt:message key="label.s3.use_https"/>',
+'label.s3.connection_timeout': '<fmt:message key="label.s3.connection_timeout"/>',
+'label.s3.max_error_retry': '<fmt:message key="label.s3.max_error_retry"/>',
+'label.s3.socket_timeout': '<fmt:message key="label.s3.socket_timeout"/>',
 'label.egress.rules': '<fmt:message key="label.egress.rules"/>',
 'message.acquire.new.ip.vpc': '<fmt:message key="message.acquire.new.ip.vpc"/>',
 'label.quickview': '<fmt:message key="label.quickview"/>',

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/ui/scripts/cloudStack.js
----------------------------------------------------------------------
diff --git a/ui/scripts/cloudStack.js b/ui/scripts/cloudStack.js
index de3bd73..8fda244 100644
--- a/ui/scripts/cloudStack.js
+++ b/ui/scripts/cloudStack.js
@@ -178,8 +178,22 @@
                 havingSwift = true;
             }
           });
+          if (havingSwift == false) {
+            $.ajax({
+              url: createURL("listS3s"),
+              dataType: "json",
+              async: false,
+              success: function(json) {
+                var items = json.lists3sresponse.s3;
+                if (items != null && items.length > 0) {
+                  havingS3 = true;
+                }
+              }
+            });
+          }
         } else {
           havingSwift = false;
+          havingS3 = false;
         }
 
         return userValid ? {
@@ -296,8 +310,22 @@
                     havingSwift = true;
                 }
               });
+              if (havingSwift = false) {
+                $.ajax({
+                  url: createURL("listS3s"),
+                  dataType: "json",
+                  async: false,
+                  success: function(json) {
+                    var items = json.lists3sresponse.s3;
+                    if (items != null && items.length > 0) {
+                      havingS3 = true;
+                    }
+                  }
+                });
+              }
             } else {
               havingSwift = false;
+              havingS3 = false;
             }
 
             // Get project configuration

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/ui/scripts/sharedFunctions.js
----------------------------------------------------------------------
diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js
index f1b7efa..961c973 100644
--- a/ui/scripts/sharedFunctions.js
+++ b/ui/scripts/sharedFunctions.js
@@ -45,6 +45,7 @@ var pageSize = 20;
 
 var rootAccountId = 1;
 var havingSwift = false;
+var havingS3 = false;
 
 //async action
 var pollAsyncJobResult = function(args) {

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/ui/scripts/system.js
----------------------------------------------------------------------
diff --git a/ui/scripts/system.js b/ui/scripts/system.js
index 589739b..7c8f360 100644
--- a/ui/scripts/system.js
+++ b/ui/scripts/system.js
@@ -4249,6 +4249,84 @@
                       }
                     });
                   }
+                },
+
+              enableS3: {
+                label: 'label.enable.s3',
+                isHeader: true,
+                addRow: false,
+
+                preFilter: function(args) {
+                  var s3Enabled = false;
+                  $.ajax({
+                    url: createURL('listConfigurations'),
+                    data: {
+                      name: 's3.enable'
+                    },
+                    async: false,
+                    success: function(json) {
+                      s3Enabled = json.listconfigurationsresponse.configuration[0].value == 'true' && !havingS3 ?
+                      true : false;
+                    },
+                    error: function(json) {
+                      cloudStack.dialog.notice({ message: parseXMLHttpResponse(json) });
+                    }
+                 });
+
+                 return s3Enabled;
+              },
+
+              messages: {
+                notification: function(args) {
+                  return 'label.enable.s3';
+                }
+              },
+
+              createForm: {
+                desc: 'confirm.enable.s3',
+                fields: {
+                  accesskey: { label: 'label.s3.access_key', validation: { required: true } },
+                  secretkey: { label: 'label.s3.secret_key', validation: { required: true} },
+                  bucket: { label: 'label.s3.bucket', validation: { required: true} },
+                  endpoint: { label: 'label.s3.endpoint' },
+                  usehttps: { 
+                    label: 'label.s3.use_https', 
+                    isEditable: true,
+                    isBoolean: true,
+                    isChecked: true,
+                    converter:cloudStack.converters.toBooleanText 
+                  },
+                  connectiontimeout: { label: 'label.s3.connection_timeout' },
+                  maxerrorretry: { label: 'label.s3.max_error_retry' },
+                  sockettimeout: { label: 'label.s3.socket_timeout' }
+                }
+              },
+              action: function(args) {
+                $.ajax({
+                  url: createURL('addS3'),
+                  data: {
+                        accesskey: args.data.accesskey,
+                        secretkey: args.data.secretkey,
+                        bucket: args.data.bucket,
+                        endpoint: args.data.endpoint,
+                        usehttps: (args.data.usehttps != null && args.data.usehttps == 'on' ? 'true' : 'false'),
+                        connectiontimeout: args.data.connectiontimeout,
+                        maxerrorretry: args.data.maxerrorretry,
+                        sockettimeout: args.data.sockettimeout
+                      },
+                      success: function(json) {
+                        havingS3 = true;
+                        args.response.success();
+
+                        cloudStack.dialog.notice({
+                          message: 'message.after.enable.s3'
+                        });
+                      },
+                      error: function(json) {
+                        args.response.error(parseXMLHttpResponse(json));
+                      }
+                    });
+                  }
                 }
               },
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/ui/scripts/templates.js
----------------------------------------------------------------------
diff --git a/ui/scripts/templates.js b/ui/scripts/templates.js
index 3c49789..040ce4a 100644
--- a/ui/scripts/templates.js
+++ b/ui/scripts/templates.js
@@ -1420,7 +1420,7 @@
     else {
       allowedActions.push("edit");
 
-      if(havingSwift == false)
+      if(havingSwift == false && havingS3 == false)
         allowedActions.push("copyTemplate");
 
       //allowedActions.push("createVm"); // For Beta2, this simply doesn't work without a network.

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/utils/pom.xml
----------------------------------------------------------------------
diff --git a/utils/pom.xml b/utils/pom.xml
index 6bed67f..1ee8b7e 100644
--- a/utils/pom.xml
+++ b/utils/pom.xml
@@ -103,6 +103,11 @@
       <version>${cs.trilead.version}</version>
     </dependency>
     <dependency>
+      <groupId>com.amazonaws</groupId>
+      <artifactId>aws-java-sdk</artifactId>
+      <version>${cs.aws.sdk.version}</version>
+    </dependency>
+    <dependency>
       <groupId>log4j</groupId>
       <artifactId>apache-log4j-extras</artifactId>
       <version>${cs.log4j.extras.version}</version>
@@ -128,7 +133,13 @@
       <groupId>mysql</groupId>
       <artifactId>mysql-connector-java</artifactId>
       <scope>test</scope>
-    </dependency>    
+    </dependency>
+    <dependency>
+        <groupId>commons-io</groupId>
+        <artifactId>commons-io</artifactId>
+        <version>${cs.commons-io.version}</version>
+        <scope>provided</scope>
+    </dependency>
   </dependencies>
   <build>
     <defaultGoal>install</defaultGoal>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/utils/src/com/cloud/utils/DateUtil.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/DateUtil.java b/utils/src/com/cloud/utils/DateUtil.java
index be1627d..1854e15 100644
--- a/utils/src/com/cloud/utils/DateUtil.java
+++ b/utils/src/com/cloud/utils/DateUtil.java
@@ -82,6 +82,10 @@ public class DateUtil {
         return formattedString;
     }
 
+    public static Date now() {
+        return new Date(System.currentTimeMillis());
+    }
+
     public enum IntervalType {
         HOURLY,
     	DAILY,

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/utils/src/com/cloud/utils/S3Utils.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/S3Utils.java b/utils/src/com/cloud/utils/S3Utils.java
new file mode 100644
index 0000000..b7273a1
--- /dev/null
+++ b/utils/src/com/cloud/utils/S3Utils.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.utils;
+
+import static com.amazonaws.Protocol.HTTP;
+import static com.amazonaws.Protocol.HTTPS;
+import static com.cloud.utils.StringUtils.join;
+import static java.io.File.createTempFile;
+import static java.lang.String.format;
+import static java.lang.System.currentTimeMillis;
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.Collections.unmodifiableList;
+import static org.apache.commons.lang.ArrayUtils.isEmpty;
+import static org.apache.commons.lang.StringUtils.isBlank;
+import static org.apache.commons.lang.StringUtils.isNotBlank;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.log4j.Logger;
+
+import com.amazonaws.AmazonClientException;
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.BasicAWSCredentials;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.model.Bucket;
+import com.amazonaws.services.s3.model.GetObjectRequest;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.S3ObjectSummary;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public final class S3Utils {
+
+    private static final Logger LOGGER = Logger.getLogger(S3Utils.class);
+
+    public static final String SEPARATOR = "/";
+
+    private static final int MIN_BUCKET_NAME_LENGTH = 3;
+    private static final int MAX_BUCKET_NAME_LENGTH = 63;
+
+    private S3Utils() {
+        super();
+    }
+
+    private static AmazonS3 acquireClient(final ClientOptions clientOptions) {
+
+        final AWSCredentials credentials = new BasicAWSCredentials(
+                clientOptions.getAccessKey(), clientOptions.getSecretKey());
+
+        final ClientConfiguration configuration = new ClientConfiguration();
+
+        if (clientOptions.isHttps() != null) {
+            configuration.setProtocol(clientOptions.isHttps() == true ? HTTPS
+                    : HTTP);
+        }
+
+        if (clientOptions.getConnectionTimeout() != null) {
+            configuration.setConnectionTimeout(clientOptions
+                    .getConnectionTimeout());
+        }
+
+        if (clientOptions.getMaxErrorRetry() != null) {
+            configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
+        }
+
+        if (clientOptions.getSocketTimeout() != null) {
+            configuration.setSocketTimeout(clientOptions.getSocketTimeout());
+        }
+
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(format(
+                    "Creating S3 client with configuration: [protocol: %1$s, connectionTimeOut: "
+                            + "%2$s, maxErrorRetry: %3$s, socketTimeout: %4$s]",
+                    configuration.getProtocol(),
+                    configuration.getConnectionTimeout(),
+                    configuration.getMaxErrorRetry(),
+                    configuration.getSocketTimeout()));
+        }
+
+        final AmazonS3Client client = new AmazonS3Client(credentials,
+                configuration);
+
+        if (isNotBlank(clientOptions.getEndPoint())) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(format(
+                        "Setting the end point for S3 client %1$s to %2$s.",
+                        client, clientOptions.getEndPoint()));
+            }
+            client.setEndpoint(clientOptions.getEndPoint());
+        }
+
+        return client;
+
+    }
+
+    public static void putFile(final ClientOptions clientOptions,
+            final File sourceFile, final String bucketName, final String key) {
+
+        assert clientOptions != null;
+        assert sourceFile != null;
+        assert !isBlank(bucketName);
+        assert !isBlank(key);
+
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(format("Sending file %1$s as S3 object %2$s in "
+                    + "bucket %3$s", sourceFile.getName(), key, bucketName));
+        }
+
+        acquireClient(clientOptions).putObject(bucketName, key, sourceFile);
+
+    }
+
+    @SuppressWarnings("unchecked")
+    public static File getFile(final ClientOptions clientOptions,
+            final String bucketName, final String key,
+            final File targetDirectory, final FileNamingStrategy namingStrategy) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+        assert isNotBlank(key);
+        assert targetDirectory != null && targetDirectory.isDirectory();
+        assert namingStrategy != null;
+
+        final AmazonS3 connection = acquireClient(clientOptions);
+
+        File tempFile = null;
+        try {
+
+            tempFile = createTempFile(
+                    join(asList(targetDirectory.getName(), currentTimeMillis(),
+                            "part"), "-"), "tmp", targetDirectory);
+            tempFile.deleteOnExit();
+
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(format(
+                        "Downloading object %1$s from bucket %2$s to temp file %3$s",
+                        key, bucketName, tempFile.getName()));
+            }
+
+            connection.getObject(new GetObjectRequest(bucketName, key),
+                    tempFile);
+
+            final File targetFile = new File(targetDirectory,
+                    namingStrategy.determineFileName(key));
+            tempFile.renameTo(targetFile);
+
+            return targetFile;
+
+        } catch (FileNotFoundException e) {
+
+            throw new CloudRuntimeException(
+                    format("Failed open file %1$s in order to get object %2$s from bucket %3$s.",
+                            targetDirectory.getAbsoluteFile(), bucketName, key),
+                    e);
+
+        } catch (IOException e) {
+
+            throw new CloudRuntimeException(
+                    format("Unable to allocate temporary file in directory %1$s to download %2$s:%3$s from S3",
+                            targetDirectory.getAbsolutePath(), bucketName, key),
+                    e);
+
+        } finally {
+
+            if (tempFile != null) {
+                tempFile.delete();
+            }
+
+        }
+
+    }
+
+    public static List<File> getDirectory(final ClientOptions clientOptions,
+            final String bucketName, final String sourcePath,
+            final File targetDirectory, final FileNamingStrategy namingStrategy) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+        assert isNotBlank(sourcePath);
+        assert targetDirectory != null;
+
+        final AmazonS3 connection = acquireClient(clientOptions);
+
+        // List the objects in the source directory on S3
+        final List<S3ObjectSummary> objectSummaries = listDirectory(bucketName,
+                sourcePath, connection);
+        final List<File> files = new ArrayList<File>();
+
+        for (final S3ObjectSummary objectSummary : objectSummaries) {
+
+            files.add(getFile(clientOptions, bucketName,
+                    objectSummary.getKey(), targetDirectory, namingStrategy));
+
+        }
+
+        return unmodifiableList(files);
+
+    }
+
+    private static List<S3ObjectSummary> listDirectory(final String bucketName,
+            final String directory, final AmazonS3 client) {
+
+        final List<S3ObjectSummary> objects = client.listObjects(bucketName,
+                directory + SEPARATOR).getObjectSummaries();
+
+        if (objects == null) {
+            return emptyList();
+        }
+
+        return unmodifiableList(objects);
+
+    }
+
+    public static void putDirectory(final ClientOptions clientOptions,
+            final String bucketName, final File directory,
+            final FilenameFilter fileNameFilter,
+            final ObjectNamingStrategy namingStrategy) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+        assert directory != null && directory.isDirectory();
+        assert fileNameFilter != null;
+        assert namingStrategy != null;
+
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(format("Putting directory %1$s in S3 bucket %2$s.",
+                    directory.getAbsolutePath(), bucketName));
+        }
+
+        // Determine the list of files to be sent using the passed filter ...
+        final File[] files = directory.listFiles(fileNameFilter);
+
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(format("Putting files (%1$s) in S3 bucket %2$s.",
+                    ArrayUtils.toString(files, "no files found"), bucketName));
+        }
+
+        // Skip spinning up an S3 connection when no files will be sent ...
+        if (isEmpty(files)) {
+            return;
+        }
+
+        final AmazonS3 client = acquireClient(clientOptions);
+
+        // Send the files to S3 using the passed ObjectNaming strategy to
+        // determine the key ...
+        for (final File file : files) {
+            final String key = namingStrategy.determineKey(file);
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(format(
+                        "Putting file %1$s into bucket %2$s with key %3$s.",
+                        file.getAbsolutePath(), bucketName, key));
+            }
+            client.putObject(bucketName, key, file);
+        }
+
+    }
+
+    public static void deleteObject(final ClientOptions clientOptions,
+            final String bucketName, final String key) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+        assert isNotBlank(key);
+
+        final AmazonS3 client = acquireClient(clientOptions);
+
+        client.deleteObject(bucketName, key);
+
+    }
+
+    public static void deleteDirectory(final ClientOptions clientOptions,
+            final String bucketName, final String directoryName) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+        assert isNotBlank(directoryName);
+
+        final AmazonS3 client = acquireClient(clientOptions);
+
+        final List<S3ObjectSummary> objects = listDirectory(bucketName,
+                directoryName, client);
+
+        for (final S3ObjectSummary object : objects) {
+
+            client.deleteObject(bucketName, object.getKey());
+
+        }
+
+        client.deleteObject(bucketName, directoryName);
+
+    }
+
+    public static boolean canConnect(final ClientOptions clientOptions) {
+
+        try {
+
+            acquireClient(clientOptions);
+            return true;
+
+        } catch (AmazonClientException e) {
+
+            LOGGER.warn("Ignored Exception while checking connection options",
+                    e);
+            return false;
+
+        }
+
+    }
+
+    public static boolean doesBucketExist(final ClientOptions clientOptions,
+            final String bucketName) {
+
+        assert clientOptions != null;
+        assert !isBlank(bucketName);
+
+        try {
+
+            final List<Bucket> buckets = acquireClient(clientOptions)
+                    .listBuckets();
+
+            for (Bucket bucket : buckets) {
+                if (bucket.getName().equals(bucketName)) {
+                    return true;
+                }
+            }
+
+            return false;
+
+        } catch (AmazonClientException e) {
+
+            LOGGER.warn("Ignored Exception while checking bucket existence", e);
+            return false;
+
+        }
+
+    }
+
+    public static boolean canReadWriteBucket(final ClientOptions clientOptions,
+            final String bucketName) {
+
+        assert clientOptions != null;
+        assert isNotBlank(bucketName);
+
+        try {
+
+            final AmazonS3 client = acquireClient(clientOptions);
+
+            final String fileContent = "testing put and delete";
+            final InputStream inputStream = new ByteArrayInputStream(
+                    fileContent.getBytes());
+            final String key = UUID.randomUUID().toString() + ".txt";
+
+            final ObjectMetadata metadata = new ObjectMetadata();
+            metadata.setContentLength(fileContent.length());
+
+            client.putObject(bucketName, key, inputStream, metadata);
+            client.deleteObject(bucketName, key);
+
+            return true;
+
+        } catch (AmazonClientException e) {
+
+            return false;
+
+        }
+
+    }
+
+    public static List<String> checkClientOptions(ClientOptions clientOptions) {
+
+        assert clientOptions != null;
+
+        List<String> errorMessages = new ArrayList<String>();
+
+        errorMessages.addAll(checkRequiredField("access key",
+                clientOptions.getAccessKey()));
+        errorMessages.addAll(checkRequiredField("secret key",
+                clientOptions.getSecretKey()));
+
+        errorMessages.addAll(checkOptionalField("connection timeout",
+                clientOptions.getConnectionTimeout()));
+        errorMessages.addAll(checkOptionalField("socket timeout",
+                clientOptions.getSocketTimeout()));
+        errorMessages.addAll(checkOptionalField("max error retries",
+                clientOptions.getMaxErrorRetry()));
+
+        return unmodifiableList(errorMessages);
+
+    }
+
+    public static List<String> checkBucketName(final String bucketLabel,
+            final String bucket) {
+
+        assert isNotBlank(bucketLabel);
+        assert isNotBlank(bucket);
+
+        final List<String> errorMessages = new ArrayList<String>();
+
+        if (bucket.length() < MIN_BUCKET_NAME_LENGTH) {
+            errorMessages
+                    .add(format(
+                            "The length of %1$s "
+                                    + "for the %2$s must have a length of at least %3$s "
+                                    + "characters", bucket, bucketLabel,
+                            MIN_BUCKET_NAME_LENGTH));
+        }
+
+        if (bucket.length() > MAX_BUCKET_NAME_LENGTH) {
+            errorMessages.add(format("The length of %1$s "
+                    + "for the %2$s must not have a length of at greater"
+                    + " than %3$s characters", bucket, bucketLabel,
+                    MAX_BUCKET_NAME_LENGTH));
+        }
+
+        return unmodifiableList(errorMessages);
+
+    }
+
+    private static List<String> checkOptionalField(final String fieldName,
+            final Integer fieldValue) {
+        if (fieldValue != null && fieldValue < 0) {
+            return singletonList(format("The value of %1$s must "
+                    + "be greater than zero.", fieldName));
+        }
+        return emptyList();
+    }
+
+    private static List<String> checkRequiredField(String fieldName,
+            String fieldValue) {
+        if (isBlank(fieldValue)) {
+            return singletonList(format("A %1$s must be specified.", fieldName));
+        }
+        return emptyList();
+    }
+
+    public interface ClientOptions {
+
+        String getAccessKey();
+
+        String getSecretKey();
+
+        String getEndPoint();
+
+        Boolean isHttps();
+
+        Integer getConnectionTimeout();
+
+        Integer getMaxErrorRetry();
+
+        Integer getSocketTimeout();
+
+    }
+
+    public interface ObjectNamingStrategy {
+
+        String determineKey(File file);
+
+    }
+
+    public interface FileNamingStrategy {
+
+        String determineFileName(String key);
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/utils/src/com/cloud/utils/StringUtils.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/StringUtils.java b/utils/src/com/cloud/utils/StringUtils.java
index 0f0ef05..17df3e1 100644
--- a/utils/src/com/cloud/utils/StringUtils.java
+++ b/utils/src/com/cloud/utils/StringUtils.java
@@ -16,6 +16,8 @@
 // under the License.
 package com.cloud.utils;
 
+import static java.util.Arrays.*;
+
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -42,8 +44,12 @@ public class StringUtils {
         }
         return sb.toString();
     }
-    
-    
+
+    public static String join(final String delimiter,
+            final Object... components) {
+        return join(asList(components), delimiter);
+    }
+
 	/**
 	 * @param tags
 	 * @return List of tags
@@ -128,4 +134,5 @@ public class StringUtils {
     	
     	return sb.toString();
     }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/b70c1a5a/utils/src/com/cloud/utils/db/GlobalLock.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/db/GlobalLock.java b/utils/src/com/cloud/utils/db/GlobalLock.java
index 7c1c943..c956bbf 100644
--- a/utils/src/com/cloud/utils/db/GlobalLock.java
+++ b/utils/src/com/cloud/utils/db/GlobalLock.java
@@ -16,8 +16,11 @@
 // under the License.
 package com.cloud.utils.db;
 
+import static java.lang.String.format;
+
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.Callable;
 
 import org.apache.log4j.Logger;
 
@@ -201,4 +204,41 @@ public class GlobalLock {
 	public String getName() {
 		return name;
 	}
+
+    public static <T> T executeWithLock(final String operationId,
+            final int lockAcquisitionTimeout, final Callable<T> operation)
+            throws Exception {
+
+        final GlobalLock lock = GlobalLock.getInternLock(operationId);
+
+        try {
+
+            if (!lock.lock(lockAcquisitionTimeout)) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug(format(
+                            "Failed to acquire lock for operation id %1$s",
+                            operationId));
+                }
+                return null;
+            }
+
+            return operation.call();
+
+        } finally {
+
+            if (lock != null) {
+                lock.unlock();
+            }
+
+        }
+
+    }
+
+    public static <T> T executeWithNoWaitLock(final String operationId,
+            final Callable<T> operation) throws Exception {
+
+        return executeWithLock(operationId, 0, operation);
+
+    }
+
 }


Mime
View raw message