cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject [1/3] git commit: updated refs/heads/master to 7665bdc
Date Fri, 20 Nov 2015 21:28:06 GMT
Repository: cloudstack
Updated Branches:
  refs/heads/master b3b56e2cd -> 7665bdc81


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/framework/rest/pom.xml
----------------------------------------------------------------------
diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml
index 3b88e40..372770f 100644
--- a/framework/rest/pom.xml
+++ b/framework/rest/pom.xml
@@ -33,27 +33,27 @@
     <dependency>
       <groupId>com.fasterxml.jackson.module</groupId>
       <artifactId>jackson-module-jaxb-annotations</artifactId>
-      <version>2.4.4</version>
+      <version>2.6.3</version>
     </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-annotations</artifactId>
-      <version>2.4.4</version>
+      <version>2.6.3</version>
     </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-core</artifactId>
-      <version>2.4.4</version>
+      <version>2.6.3</version>
     </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
-      <version>2.4.4</version>
+      <version>2.6.3</version>
     </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.jaxrs</groupId>
       <artifactId>jackson-jaxrs-json-provider</artifactId>
-      <version>2.4.4</version>
+      <version>2.6.3</version>
     </dependency>
     <dependency>
       <groupId>org.apache.cxf</groupId>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/packaging/centos7/tomcat7/log4j-cloud.xml
----------------------------------------------------------------------
diff --git a/packaging/centos7/tomcat7/log4j-cloud.xml b/packaging/centos7/tomcat7/log4j-cloud.xml
index d03775c..1ebcbf8 100644
--- a/packaging/centos7/tomcat7/log4j-cloud.xml
+++ b/packaging/centos7/tomcat7/log4j-cloud.xml
@@ -162,6 +162,16 @@ under the License.
       <appender-ref ref="APISERVER"/>
    </logger>
 
+   <!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
+   <category name="com.amazonaws">
+      <priority value="INFO"/>
+   </category>
+
+   <!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
+   <category name="httpclient.wire">
+      <priority value="INFO"/>
+   </category>
+
    <!-- ============================== -->
    <!-- Add or remove these logger for SNMP, this logger is for SNMP alerts plugin -->
    <!-- ============================== -->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 08ce05a..ee7448d 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -18,8 +18,7 @@
  */
 package com.cloud.hypervisor.kvm.storage;
 
-import static com.cloud.utils.S3Utils.mputFile;
-import static com.cloud.utils.S3Utils.putFile;
+import static com.cloud.utils.storage.S3.S3Utils.putFile;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -95,7 +94,7 @@ import com.cloud.storage.template.Processor.FormatInfo;
 import com.cloud.storage.template.QCOW2Processor;
 import com.cloud.storage.template.TemplateLocation;
 import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.S3Utils;
+import com.cloud.utils.storage.S3.S3Utils;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.script.Script;
 
@@ -594,15 +593,10 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException {
-        final String bucket = destStore.getBucketName();
-
-        final long srcSize = srcFile.length();
         final String key = destPath + S3Utils.SEPARATOR + srcFile.getName();
-        if (!destStore.getSingleUpload(srcSize)) {
-            mputFile(destStore, srcFile, bucket, key);
-        } else {
-            putFile(destStore, srcFile, bucket, key);
-        }
+
+        putFile(destStore, srcFile, destStore.getBucketName(), key).waitForCompletion();
+
         return key;
     }
 
@@ -668,7 +662,7 @@ public class KVMStorageProcessor implements StorageProcessor {
         final SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData();
         snapshotOnCacheStore.setDataStore(cacheStore);
         ((SnapshotObjectTO)destData).setDataStore(imageStore);
-        final CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence());
+        final CopyCommand newCpyCmd = new   CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence());
         return copyToObjectStore(newCpyCmd);
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
index 94cf5df..38b45d0 100644
--- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
@@ -70,11 +70,11 @@ import com.cloud.storage.DataStoreRole;
 import com.cloud.storage.Storage;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.resource.StorageProcessor;
-import com.cloud.utils.S3Utils;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.storage.encoding.DecodedDataObject;
 import com.cloud.utils.storage.encoding.DecodedDataStore;
 import com.cloud.utils.storage.encoding.Decoder;
+import com.cloud.utils.storage.S3.ClientOptions;
 import com.xensource.xenapi.Connection;
 import com.xensource.xenapi.Host;
 import com.xensource.xenapi.PBD;
@@ -1061,7 +1061,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
 
         try {
 
-            final List<String> parameters = newArrayList(flattenProperties(s3, S3Utils.ClientOptions.class));
+            final List<String> parameters = newArrayList(flattenProperties(s3, ClientOptions.class));
             // https workaround for Introspector bug that does not
             // recognize Boolean accessor methods ...
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/storage/image/s3/pom.xml
----------------------------------------------------------------------
diff --git a/plugins/storage/image/s3/pom.xml b/plugins/storage/image/s3/pom.xml
index bb12cf7..6fb4d98 100644
--- a/plugins/storage/image/s3/pom.xml
+++ b/plugins/storage/image/s3/pom.xml
@@ -19,7 +19,7 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>cloud-plugin-storage-image-s3</artifactId>
-  <name>Apache CloudStack Plugin - Storage Image S3</name>
+  <name>Apache CloudStack Plugin - Storage Image S3 provider</name>
   <parent>
     <groupId>org.apache.cloudstack</groupId>
     <artifactId>cloudstack-plugins</artifactId>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java
index 2d723f4..3c2bc95 100644
--- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java
+++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java
@@ -39,7 +39,7 @@ import com.cloud.agent.api.to.S3TO;
 import com.cloud.configuration.Config;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.S3Utils;
+import com.cloud.utils.storage.S3.S3Utils;
 
 public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
     private static final Logger s_logger = Logger.getLogger(S3ImageStoreDriverImpl.class);
@@ -58,7 +58,9 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
                         imgStore.getUuid(),
                         details.get(ApiConstants.S3_ACCESS_KEY),
                         details.get(ApiConstants.S3_SECRET_KEY),
-                        details.get(ApiConstants.S3_END_POINT), details.get(ApiConstants.S3_BUCKET_NAME),
+                        details.get(ApiConstants.S3_END_POINT),
+                        details.get(ApiConstants.S3_BUCKET_NAME),
+                        details.get(ApiConstants.S3_SIGNER),
                         details.get(ApiConstants.S3_HTTPS_FLAG) == null ? false : Boolean.parseBoolean(details.get(ApiConstants.S3_HTTPS_FLAG)),
                         details.get(ApiConstants.S3_CONNECTION_TIMEOUT) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_CONNECTION_TIMEOUT)),
                         details.get(ApiConstants.S3_MAX_ERROR_RETRY) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_MAX_ERROR_RETRY)),
@@ -74,27 +76,29 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
         try {
             return Long.parseLong(_configDao.getValue(Config.S3MaxSingleUploadSize.toString())) * 1024L * 1024L * 1024L;
         } catch (NumberFormatException e) {
-            // use default 5GB
-            return 5L * 1024L * 1024L * 1024L;
+            // use default 1TB
+            return 1024L * 1024L * 1024L * 1024L;
         }
     }
 
     @Override
-    public String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject) {
-        // for S3, no need to do anything, just return template url for
-        // extract template. but we need to set object acl as public_read to
-        // make the url accessible
+    public String createEntityExtractUrl(DataStore store, String key, ImageFormat format, DataObject dataObject) {
+        /**
+         * Generate a pre-signed URL for the given object.
+         */
         S3TO s3 = (S3TO)getStoreTO(store);
-        String key = installPath;
 
-        s_logger.info("Generating pre-signed s3 entity extraction URL.");
+        if(s_logger.isDebugEnabled()) {
+            s_logger.debug("Generating pre-signed s3 entity extraction URL for object: " + key);
+        }
         Date expiration = new Date();
         long milliSeconds = expiration.getTime();
 
-        // get extract url expiration interval set in global configuration (in seconds)
+        // Get extract url expiration interval set in global configuration (in seconds)
         String urlExpirationInterval = _configDao.getValue(Config.ExtractURLExpirationInterval.toString());
-        int expirationInterval = NumbersUtil.parseInt(urlExpirationInterval, 14400);
-        milliSeconds += 1000 * expirationInterval; // expired after configured interval (in milliseconds)
+
+        // Expired after configured interval (in milliseconds), default 14400 seconds
+        milliSeconds += 1000 * NumbersUtil.parseInt(urlExpirationInterval, 14400);
         expiration.setTime(milliSeconds);
 
         URL s3url = S3Utils.generatePresignedUrl(s3, s3.getBucketName(), key, expiration);
@@ -103,5 +107,4 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
 
         return s3url.toString();
     }
-
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
index 718c591..062fb70 100644
--- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
@@ -71,7 +71,6 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
     @Override
     public DataStore initialize(Map<String, Object> dsInfos) {
 
-        Long dcId = (Long)dsInfos.get("zoneId");
         String url = (String)dsInfos.get("url");
         String name = (String)dsInfos.get("name");
         String providerName = (String)dsInfos.get("providerName");
@@ -79,11 +78,10 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
         DataStoreRole role = (DataStoreRole)dsInfos.get("role");
         Map<String, String> details = (Map<String, String>)dsInfos.get("details");
 
-        s_logger.info("Trying to add a S3 store in data center " + dcId);
+        s_logger.info("Trying to add a S3 store with endpoint: " + details.get(ApiConstants.S3_END_POINT));
 
-        Map<String, Object> imageStoreParameters = new HashMap<String, Object>();
+        Map<String, Object> imageStoreParameters = new HashMap();
         imageStoreParameters.put("name", name);
-        imageStoreParameters.put("zoneId", dcId);
         imageStoreParameters.put("url", url);
         String protocol = "http";
         String useHttps = details.get(ApiConstants.S3_HTTPS_FLAG);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/provider/S3ImageStoreProviderImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/provider/S3ImageStoreProviderImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/provider/S3ImageStoreProviderImpl.java
index fff55a0..1894794 100644
--- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/provider/S3ImageStoreProviderImpl.java
+++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/provider/S3ImageStoreProviderImpl.java
@@ -44,14 +44,15 @@ import com.cloud.utils.component.ComponentContext;
 @Component
 public class S3ImageStoreProviderImpl implements ImageStoreProvider {
 
-    private final String providerName = DataStoreProvider.S3_IMAGE;
-    protected ImageStoreLifeCycle lifeCycle;
-    protected ImageStoreDriver driver;
     @Inject
     ImageStoreProviderManager storeMgr;
     @Inject
     ImageStoreHelper helper;
 
+    private final String providerName = DataStoreProvider.S3_IMAGE;
+    protected ImageStoreLifeCycle lifeCycle;
+    protected ImageStoreDriver driver;
+
     @Override
     public DataStoreLifeCycle getDataStoreLifeCycle() {
         return lifeCycle;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index d9a131c..9fce1fc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -96,7 +96,8 @@
     <org.springframework.version>3.2.12.RELEASE</org.springframework.version>
     <cs.mockito.version>1.9.5</cs.mockito.version>
     <cs.powermock.version>1.5.3</cs.powermock.version>
-    <cs.aws.sdk.version>1.3.22</cs.aws.sdk.version>
+    <cs.aws.sdk.version>1.10.34</cs.aws.sdk.version>
+    <cs.jackson.version>2.6.3</cs.jackson.version>
     <cs.lang.version>2.6</cs.lang.version>
     <cs.lang3.version>3.4</cs.lang3.version>
     <cs.commons-io.version>2.4</cs.commons-io.version>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/scripts/vm/hypervisor/xenserver/s3xenserver
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/s3xenserver b/scripts/vm/hypervisor/xenserver/s3xenserver
index d0cea6c..7a05e05 100644
--- a/scripts/vm/hypervisor/xenserver/s3xenserver
+++ b/scripts/vm/hypervisor/xenserver/s3xenserver
@@ -377,7 +377,7 @@ class S3Client(object):
 def parseArguments(args):
 
     # The keys in the args map will correspond to the properties defined on
-    # the com.cloud.utils.S3Utils#ClientOptions interface
+    # the com.cloud.utils.storage.S3.S3Utils#ClientOptions interface
     client = S3Client(
         args['accessKey'], args['secretKey'], args['endPoint'],
         args['https'], args['connectionTimeout'], args['socketTimeout'])

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/server/conf/log4j-cloud.xml.in
----------------------------------------------------------------------
diff --git a/server/conf/log4j-cloud.xml.in b/server/conf/log4j-cloud.xml.in
index 3b4bff1..9dc81e5 100755
--- a/server/conf/log4j-cloud.xml.in
+++ b/server/conf/log4j-cloud.xml.in
@@ -109,6 +109,16 @@ under the License.
       <appender-ref ref="APISERVER"/>
    </logger>
 
+   <!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
+   <category name="com.amazonaws">
+      <priority value="INFO"/>
+   </category>
+
+   <!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
+   <category name="httpclient.wire">
+      <priority value="INFO"/>
+   </category>
+
    <!-- ======================= -->
    <!-- Setup the Root category -->
    <!-- ======================= -->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index 25071a2..a4e14f1 100644
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -162,7 +162,7 @@ import org.apache.cloudstack.api.command.admin.router.StopRouterCmd;
 import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd;
 import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd;
 import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd;
-import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd;
+import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
 import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
 import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
 import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
@@ -171,7 +171,6 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
 import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
 import org.apache.cloudstack.api.command.admin.storage.FindStoragePoolsForMigrationCmd;
 import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
-import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd;
 import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
 import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
 import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd;
@@ -2640,12 +2639,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(StartRouterCmd.class);
         cmdList.add(StopRouterCmd.class);
         cmdList.add(UpgradeRouterCmd.class);
-        cmdList.add(AddS3Cmd.class);
         cmdList.add(AddSwiftCmd.class);
         cmdList.add(CancelPrimaryStorageMaintenanceCmd.class);
         cmdList.add(CreateStoragePoolCmd.class);
         cmdList.add(DeletePoolCmd.class);
-        cmdList.add(ListS3sCmd.class);
         cmdList.add(ListSwiftsCmd.class);
         cmdList.add(ListStoragePoolsCmd.class);
         cmdList.add(ListStorageTagsCmd.class);
@@ -2911,6 +2908,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(RemoveFromGlobalLoadBalancerRuleCmd.class);
         cmdList.add(ListStorageProvidersCmd.class);
         cmdList.add(AddImageStoreCmd.class);
+        cmdList.add(AddImageStoreS3CMD.class);
         cmdList.add(ListImageStoresCmd.class);
         cmdList.add(DeleteImageStoreCmd.class);
         cmdList.add(CreateSecondaryStagingStoreCmd.class);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index ba39e1f..41f0019 100644
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -1844,7 +1844,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     }
 
     @Override
-    public ImageStore discoverImageStore(String name, String url, String providerName, Long dcId, Map details) throws IllegalArgumentException, DiscoveryException,
+    public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException,
     InvalidParameterValueException {
         DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
 
@@ -1857,13 +1857,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         }
 
         ScopeType scopeType = ScopeType.ZONE;
-        if (dcId == null) {
+        if (zoneId == null) {
             scopeType = ScopeType.REGION;
         }
 
         if (name == null) {
             name = url;
         }
+
         ImageStoreVO imageStore = _imageStoreDao.findByName(name);
         if (imageStore != null) {
             throw new InvalidParameterValueException("The image store with name " + name + " already exists, try creating with another name");
@@ -1884,11 +1885,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             }
         }
 
-        if (dcId != null) {
+        if (zoneId != null) {
             // Check if the zone exists in the system
-            DataCenterVO zone = _dcDao.findById(dcId);
+            DataCenterVO zone = _dcDao.findById(zoneId);
             if (zone == null) {
-                throw new InvalidParameterValueException("Can't find zone by id " + dcId);
+                throw new InvalidParameterValueException("Can't find zone by id " + zoneId);
             }
 
             Account account = CallContext.current().getCallingAccount();
@@ -1901,8 +1902,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             }
         }
 
-        Map<String, Object> params = new HashMap<String, Object>();
-        params.put("zoneId", dcId);
+        Map<String, Object> params = new HashMap();
+        params.put("zoneId", zoneId);
         params.put("url", url);
         params.put("name", name);
         params.put("details", details);
@@ -1911,11 +1912,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         params.put("role", DataStoreRole.Image);
 
         DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
+
         DataStore store;
         try {
             store = lifeCycle.initialize(params);
         } catch (Exception e) {
-            s_logger.debug("Failed to add data store: " + e.getMessage(), e);
+            if(s_logger.isDebugEnabled()) {
+                s_logger.debug("Failed to add data store: " + e.getMessage(), e);
+            }
             throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
         }
 
@@ -1927,9 +1931,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             _imageSrv.addSystemVMTemplatesToSecondary(store);
         }
 
-        // associate builtin template with zones associated with this image
-        // store
-        associateCrosszoneTemplatesToZone(dcId);
+        // associate builtin template with zones associated with this image store
+        associateCrosszoneTemplatesToZone(zoneId);
 
         // duplicate cache store records to region wide storage
         if (scopeType == ScopeType.REGION) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
index f1937f8..65471dd 100644
--- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
+++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
@@ -49,7 +49,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.storage.DownloadAnswer;
-import com.cloud.agent.api.storage.Proxy;
+import com.cloud.utils.net.Proxy;
 import com.cloud.configuration.Config;
 import com.cloud.storage.RegisterVolumePayload;
 import com.cloud.storage.Storage.ImageFormat;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
----------------------------------------------------------------------
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
index be59691..b5c1576 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
@@ -16,8 +16,7 @@
 // under the License.
 package org.apache.cloudstack.storage.resource;
 
-import static com.cloud.utils.S3Utils.mputFile;
-import static com.cloud.utils.S3Utils.putFile;
+import static com.cloud.utils.storage.S3.S3Utils.putFile;
 import static com.cloud.utils.StringUtils.join;
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
@@ -155,8 +154,7 @@ import com.cloud.storage.template.TemplateProp;
 import com.cloud.storage.template.VhdProcessor;
 import com.cloud.storage.template.VmdkProcessor;
 import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.S3Utils;
-import com.cloud.utils.S3Utils.FileNamingStrategy;
+import com.cloud.utils.storage.S3.S3Utils;
 import com.cloud.utils.SwiftUtil;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.net.NetUtils;
@@ -386,12 +384,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
                 }
             }
 
-            File destFile = S3Utils.getFile(s3, s3.getBucketName(), srcData.getPath(), downloadDirectory, new FileNamingStrategy() {
-                @Override
-                public String determineFileName(final String key) {
-                    return substringAfterLast(key, S3Utils.SEPARATOR);
-                }
-            });
+            File destFile = new File(downloadDirectory, substringAfterLast(srcData.getPath(), S3Utils.SEPARATOR));
+
+            S3Utils.getFile(s3, s3.getBucketName(), srcData.getPath(), destFile).waitForCompletion();
+
 
             if (destFile == null) {
                 return new CopyCmdAnswer("Can't find template");
@@ -400,7 +396,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
             return postProcessing(destFile, downloadPath, destPath, srcData, destData);
         } catch (Exception e) {
 
-            final String errMsg = format("Failed to download" + "due to $2%s", e.getMessage());
+            final String errMsg = format("Failed to download" + "due to $1%s", e.getMessage());
             s_logger.error(errMsg, e);
             return new CopyCmdAnswer(errMsg);
         }
@@ -907,14 +903,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
                 }
             }
 
-            long srcSize = srcFile.length();
             ImageFormat format = getTemplateFormat(srcFile.getName());
             String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName();
-            if (!s3.getSingleUpload(srcSize)) {
-                mputFile(s3, srcFile, bucket, key);
-            } else {
-                putFile(s3, srcFile, bucket, key);
-            }
+
+            putFile(s3, srcFile, bucket, key).waitForCompletion();
 
             DataTO retObj = null;
             if (destData.getObjectType() == DataObjectType.TEMPLATE) {
@@ -1509,7 +1501,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
     Map<String, TemplateProp> s3ListTemplate(S3TO s3) {
         String bucket = s3.getBucketName();
         // List the objects in the source directory on S3
-        final List<S3ObjectSummary> objectSummaries = S3Utils.getDirectory(s3, bucket, TEMPLATE_ROOT_DIR);
+        final List<S3ObjectSummary> objectSummaries = S3Utils.listDirectory(s3, bucket, TEMPLATE_ROOT_DIR);
         if (objectSummaries == null) {
             return null;
         }
@@ -1530,7 +1522,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
     Map<Long, TemplateProp> s3ListVolume(S3TO s3) {
         String bucket = s3.getBucketName();
         // List the objects in the source directory on S3
-        final List<S3ObjectSummary> objectSummaries = S3Utils.getDirectory(s3, bucket, VOLUME_ROOT_DIR);
+        final List<S3ObjectSummary> objectSummaries = S3Utils.listDirectory(s3, bucket, VOLUME_ROOT_DIR);
         if (objectSummaries == null) {
             return null;
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResource.java
----------------------------------------------------------------------
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResource.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResource.java
index 93fd8ea..4d3f048 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResource.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResource.java
@@ -19,11 +19,10 @@ package org.apache.cloudstack.storage.resource;
 import com.cloud.resource.ServerResource;
 
 /**
- *
  * SecondaryStorageServerResource is a generic container to execute commands sent
  */
 public interface SecondaryStorageResource extends ServerResource {
 
-    public String getRootDir(String cmd);
+    String getRootDir(String cmd);
 
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResourceHandler.java
----------------------------------------------------------------------
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResourceHandler.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResourceHandler.java
index 14ebc71..5c97e48 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResourceHandler.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageResourceHandler.java
@@ -20,5 +20,7 @@ import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.Command;
 
 public interface SecondaryStorageResourceHandler {
+
     Answer executeRequest(Command cmd);
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManager.java
----------------------------------------------------------------------
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManager.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManager.java
index 0b6d47d..78190af 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManager.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManager.java
@@ -24,7 +24,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
 import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
 
 import com.cloud.agent.api.storage.DownloadAnswer;
-import com.cloud.agent.api.storage.Proxy;
+import com.cloud.utils.net.Proxy;
 import com.cloud.agent.api.to.S3TO;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.VMTemplateHostVO;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java
----------------------------------------------------------------------
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java
index 431a204..f1706e22 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java
@@ -49,7 +49,7 @@ import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
 import org.apache.log4j.Logger;
 
 import com.cloud.agent.api.storage.DownloadAnswer;
-import com.cloud.agent.api.storage.Proxy;
+import com.cloud.utils.net.Proxy;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.NfsTO;
 import com.cloud.agent.api.to.S3TO;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/systemvm/conf.dom0/log4j-cloud.xml.in
----------------------------------------------------------------------
diff --git a/systemvm/conf.dom0/log4j-cloud.xml.in b/systemvm/conf.dom0/log4j-cloud.xml.in
index dde844f..bc9f35e 100644
--- a/systemvm/conf.dom0/log4j-cloud.xml.in
+++ b/systemvm/conf.dom0/log4j-cloud.xml.in
@@ -88,6 +88,16 @@ under the License.
      <priority value="INFO"/>
    </category>
 
+   <!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
+   <category name="com.amazonaws">
+      <priority value="INFO"/>
+   </category>
+
+   <!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
+   <category name="httpclient.wire">
+      <priority value="INFO"/>
+   </category>
+
    <!-- ======================= -->
    <!-- Setup the Root category -->
    <!-- ======================= -->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/systemvm/conf/log4j-cloud.xml
----------------------------------------------------------------------
diff --git a/systemvm/conf/log4j-cloud.xml b/systemvm/conf/log4j-cloud.xml
index 2d1d361..9c26bf4 100644
--- a/systemvm/conf/log4j-cloud.xml
+++ b/systemvm/conf/log4j-cloud.xml
@@ -89,6 +89,16 @@ under the License.
      <priority value="INFO"/>
    </category>
 
+   <!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
+   <category name="com.amazonaws">
+      <priority value="INFO"/>
+   </category>
+
+   <!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
+   <category name="httpclient.wire">
+      <priority value="INFO"/>
+   </category>
+
    <!-- ======================= -->
    <!-- Setup the Root category -->
    <!-- ======================= -->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/pom.xml
----------------------------------------------------------------------
diff --git a/utils/pom.xml b/utils/pom.xml
index f52014a..5502c58 100755
--- a/utils/pom.xml
+++ b/utils/pom.xml
@@ -168,6 +168,11 @@
       <artifactId>guava-testlib</artifactId>
       <version>${cs.guava-testlib.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+      <version>${cs.jackson.version}</version>
+    </dependency>
   </dependencies>
   <build>
     <sourceDirectory>src/main/java</sourceDirectory>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/S3Utils.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/S3Utils.java b/utils/src/main/java/com/cloud/utils/S3Utils.java
deleted file mode 100644
index c07db33..0000000
--- a/utils/src/main/java/com/cloud/utils/S3Utils.java
+++ /dev/null
@@ -1,619 +0,0 @@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-
-package com.cloud.utils;
-
-import static com.amazonaws.Protocol.HTTP;
-import static com.amazonaws.Protocol.HTTPS;
-import static com.cloud.utils.StringUtils.join;
-import static java.io.File.createTempFile;
-import static java.lang.String.format;
-import static java.lang.System.currentTimeMillis;
-import static java.util.Collections.emptyList;
-import static java.util.Collections.singletonList;
-import static java.util.Collections.unmodifiableList;
-import static org.apache.commons.lang.ArrayUtils.isEmpty;
-import static org.apache.commons.lang.StringUtils.isBlank;
-import static org.apache.commons.lang.StringUtils.isNotBlank;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.log4j.Logger;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.ClientConfiguration;
-import com.amazonaws.HttpMethod;
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3Client;
-import com.amazonaws.services.s3.model.Bucket;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.GetObjectRequest;
-import com.amazonaws.services.s3.model.ListObjectsRequest;
-import com.amazonaws.services.s3.model.ObjectListing;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectInputStream;
-import com.amazonaws.services.s3.model.S3ObjectSummary;
-import com.amazonaws.services.s3.transfer.TransferManager;
-import com.amazonaws.services.s3.transfer.Upload;
-import com.cloud.utils.exception.CloudRuntimeException;
-
-public final class S3Utils {
-
-    private static final Logger LOGGER = Logger.getLogger(S3Utils.class);
-
-    public static final String SEPARATOR = "/";
-
-    private static final int MIN_BUCKET_NAME_LENGTH = 3;
-    private static final int MAX_BUCKET_NAME_LENGTH = 63;
-
-    private S3Utils() {
-        super();
-    }
-
-    public static AmazonS3 acquireClient(final ClientOptions clientOptions) {
-
-        final AWSCredentials credentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
-
-        final ClientConfiguration configuration = new ClientConfiguration();
-
-        if (clientOptions.isHttps() != null) {
-            configuration.setProtocol(clientOptions.isHttps() == true ? HTTPS : HTTP);
-        }
-
-        if (clientOptions.getConnectionTimeout() != null) {
-            configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
-        }
-
-        if (clientOptions.getMaxErrorRetry() != null) {
-            configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
-        }
-
-        if (clientOptions.getSocketTimeout() != null) {
-            configuration.setSocketTimeout(clientOptions.getSocketTimeout());
-        }
-
-        if (clientOptions.getUseTCPKeepAlive() != null) {
-            //configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
-            LOGGER.debug("useTCPKeepAlive not supported by old AWS SDK");
-        }
-
-        if (clientOptions.getConnectionTtl() != null) {
-            //configuration.setConnectionTTL(clientOptions.getConnectionTtl());
-            LOGGER.debug("connectionTtl not supported by old AWS SDK");
-        }
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, connectionTimeOut: " + "%2$s, maxErrorRetry: %3$s, socketTimeout: %4$s, useTCPKeepAlive: %5$s, connectionTtl: %6$s]",
-                configuration.getProtocol(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(),
-                -1, -1));
-        }
-
-        final AmazonS3Client client = new AmazonS3Client(credentials, configuration);
-
-        if (isNotBlank(clientOptions.getEndPoint())) {
-            if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug(format("Setting the end point for S3 client %1$s to %2$s.", client, clientOptions.getEndPoint()));
-            }
-            client.setEndpoint(clientOptions.getEndPoint());
-        }
-
-        return client;
-
-    }
-
-    public static void putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) {
-
-        assert clientOptions != null;
-        assert sourceFile != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Sending file %1$s as S3 object %2$s in " + "bucket %3$s", sourceFile.getName(), key, bucketName));
-        }
-
-        acquireClient(clientOptions).putObject(bucketName, key, sourceFile);
-
-    }
-
-    public static void putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) {
-
-        assert clientOptions != null;
-        assert sourceStream != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Sending stream as S3 object %1$s in " + "bucket %2$s", key, bucketName));
-        }
-
-        acquireClient(clientOptions).putObject(bucketName, key, sourceStream, null);
-
-    }
-
-    public static void putObject(final ClientOptions clientOptions, final PutObjectRequest req) {
-
-        assert clientOptions != null;
-        assert req != null;
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Sending stream as S3 object using PutObjectRequest"));
-        }
-
-        acquireClient(clientOptions).putObject(req);
-
-    }
-
-    // multi-part upload file
-    public static void mputFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) throws InterruptedException {
-
-        assert clientOptions != null;
-        assert sourceFile != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Multipart sending file %1$s as S3 object %2$s in " + "bucket %3$s", sourceFile.getName(), key, bucketName));
-        }
-        TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
-        Upload upload = tm.upload(bucketName, key, sourceFile);
-        upload.waitForCompletion();
-    }
-
-    // multi-part upload object
-    public static void mputObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key)
-        throws InterruptedException {
-
-        assert clientOptions != null;
-        assert sourceStream != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Multipart sending stream as S3 object %1$s in " + "bucket %2$s", key, bucketName));
-        }
-        TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
-        Upload upload = tm.upload(bucketName, key, sourceStream, null);
-        upload.waitForCompletion();
-    }
-
-    // multi-part upload object
-    public static void mputObject(final ClientOptions clientOptions, final PutObjectRequest req) throws InterruptedException {
-
-        assert clientOptions != null;
-        assert req != null;
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("Multipart sending object to S3 using PutObjectRequest");
-        }
-        TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
-        Upload upload = tm.upload(req);
-        upload.waitForCompletion();
-
-    }
-
-    public static void setObjectAcl(final ClientOptions clientOptions, final String bucketName, final String key, final CannedAccessControlList acl) {
-
-        assert clientOptions != null;
-        assert acl != null;
-
-        acquireClient(clientOptions).setObjectAcl(bucketName, key, acl);
-
-    }
-
-    public static URL generatePresignedUrl(final ClientOptions clientOptions, final String bucketName, final String key, final Date expiration) {
-
-        assert clientOptions != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        return acquireClient(clientOptions).generatePresignedUrl(bucketName, key, expiration, HttpMethod.GET);
-
-    }
-
-    // Note that whenever S3Object is returned, client code needs to close the internal stream to avoid resource leak.
-    public static S3Object getObject(final ClientOptions clientOptions, final String bucketName, final String key) {
-
-        assert clientOptions != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Get S3 object %1$s in " + "bucket %2$s", key, bucketName));
-        }
-
-        return acquireClient(clientOptions).getObject(bucketName, key);
-
-    }
-
-    // Note that whenever S3Object is returned, client code needs to close the internal stream to avoid resource leak.
-    public static S3ObjectInputStream getObjectStream(final ClientOptions clientOptions, final String bucketName, final String key) {
-
-        assert clientOptions != null;
-        assert !isBlank(bucketName);
-        assert !isBlank(key);
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Get S3 object %1$s in " + "bucket %2$s", key, bucketName));
-        }
-
-        return acquireClient(clientOptions).getObject(bucketName, key).getObjectContent();
-
-    }
-
-    @SuppressWarnings("unchecked")
-    public static File getFile(final ClientOptions clientOptions, final String bucketName, final String key, final File targetDirectory,
-        final FileNamingStrategy namingStrategy) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert isNotBlank(key);
-        assert targetDirectory != null && targetDirectory.isDirectory();
-        assert namingStrategy != null;
-
-        final AmazonS3 connection = acquireClient(clientOptions);
-
-        File tempFile = null;
-        try {
-
-            tempFile = createTempFile(join("-", targetDirectory.getName(), currentTimeMillis(), "part"), "tmp", targetDirectory);
-            tempFile.deleteOnExit();
-
-            if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug(format("Downloading object %1$s from bucket %2$s to temp file %3$s", key, bucketName, tempFile.getName()));
-            }
-
-            try {
-                connection.getObject(new GetObjectRequest(bucketName, key), tempFile);
-            } catch (AmazonClientException ex) {
-                // hack to handle different ETAG format generated from RiakCS for multi-part uploaded object
-                String msg = ex.getMessage();
-                if (!msg.contains("verify integrity")) {
-                    throw ex;
-                }
-            }
-
-            final File targetFile = new File(targetDirectory, namingStrategy.determineFileName(key));
-            tempFile.renameTo(targetFile);
-
-            return targetFile;
-
-        } catch (FileNotFoundException e) {
-
-            throw new CloudRuntimeException(format("Failed open file %1$s in order to get object %2$s from bucket %3$s.", targetDirectory.getAbsoluteFile(), bucketName,
-                key), e);
-
-        } catch (IOException e) {
-
-            throw new CloudRuntimeException(format("Unable to allocate temporary file in directory %1$s to download %2$s:%3$s from S3",
-                targetDirectory.getAbsolutePath(), bucketName, key), e);
-
-        } finally {
-
-            if (tempFile != null) {
-                tempFile.delete();
-            }
-
-        }
-
-    }
-
-    public static List<File> getDirectory(final ClientOptions clientOptions, final String bucketName, final String sourcePath, final File targetDirectory,
-        final FileNamingStrategy namingStrategy) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert isNotBlank(sourcePath);
-        assert targetDirectory != null;
-
-        final AmazonS3 connection = acquireClient(clientOptions);
-
-        // List the objects in the source directory on S3
-        final List<S3ObjectSummary> objectSummaries = listDirectory(bucketName, sourcePath, connection);
-        final List<File> files = new ArrayList<File>();
-
-        for (final S3ObjectSummary objectSummary : objectSummaries) {
-
-            files.add(getFile(clientOptions, bucketName, objectSummary.getKey(), targetDirectory, namingStrategy));
-
-        }
-
-        return unmodifiableList(files);
-
-    }
-
-    public static List<S3ObjectSummary> getDirectory(final ClientOptions clientOptions, final String bucketName, final String sourcePath) {
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert isNotBlank(sourcePath);
-
-        final AmazonS3 connection = acquireClient(clientOptions);
-
-        // List the objects in the source directory on S3
-        return listDirectory(bucketName, sourcePath, connection);
-    }
-
-    private static List<S3ObjectSummary> listDirectory(final String bucketName, final String directory, final AmazonS3 client) {
-
-        List<S3ObjectSummary> objects = new ArrayList<S3ObjectSummary>();
-        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(directory + SEPARATOR);
-
-        ObjectListing ol = client.listObjects(listObjectsRequest);
-        if(ol.isTruncated()) {
-            do {
-                objects.addAll(ol.getObjectSummaries());
-                listObjectsRequest.setMarker(ol.getNextMarker());
-                ol = client.listObjects(listObjectsRequest);
-            } while (ol.isTruncated());
-        }
-        else {
-            objects.addAll(ol.getObjectSummaries());
-        }
-
-        if (objects.isEmpty()) {
-            return emptyList();
-        }
-
-        return unmodifiableList(objects);
-    }
-
-    public static void putDirectory(final ClientOptions clientOptions, final String bucketName, final File directory, final FilenameFilter fileNameFilter,
-        final ObjectNamingStrategy namingStrategy) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert directory != null && directory.isDirectory();
-        assert fileNameFilter != null;
-        assert namingStrategy != null;
-
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(format("Putting directory %1$s in S3 bucket %2$s.", directory.getAbsolutePath(), bucketName));
-        }
-
-        // Determine the list of files to be sent using the passed filter ...
-        final File[] files = directory.listFiles(fileNameFilter);
-
-        if (LOGGER.isTraceEnabled()) {
-            LOGGER.trace(format("Putting files (%1$s) in S3 bucket %2$s.", ArrayUtils.toString(files, "no files found"), bucketName));
-        }
-
-        // Skip spinning up an S3 connection when no files will be sent ...
-        if (isEmpty(files)) {
-            return;
-        }
-
-        final AmazonS3 client = acquireClient(clientOptions);
-
-        // Send the files to S3 using the passed ObjectNaming strategy to
-        // determine the key ...
-        for (final File file : files) {
-            final String key = namingStrategy.determineKey(file);
-            if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug(format("Putting file %1$s into bucket %2$s with key %3$s.", file.getAbsolutePath(), bucketName, key));
-            }
-            client.putObject(bucketName, key, file);
-        }
-
-    }
-
-    public static void deleteObject(final ClientOptions clientOptions, final String bucketName, final String key) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert isNotBlank(key);
-
-        final AmazonS3 client = acquireClient(clientOptions);
-
-        client.deleteObject(bucketName, key);
-
-    }
-
-    public static void deleteDirectory(final ClientOptions clientOptions, final String bucketName, final String directoryName) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-        assert isNotBlank(directoryName);
-
-        final AmazonS3 client = acquireClient(clientOptions);
-
-        final List<S3ObjectSummary> objects = listDirectory(bucketName, directoryName, client);
-
-        for (final S3ObjectSummary object : objects) {
-
-            client.deleteObject(bucketName, object.getKey());
-
-        }
-
-        client.deleteObject(bucketName, directoryName);
-
-    }
-
-    public static boolean canConnect(final ClientOptions clientOptions) {
-
-        try {
-
-            acquireClient(clientOptions);
-            return true;
-
-        } catch (AmazonClientException e) {
-
-            LOGGER.warn("Ignored Exception while checking connection options", e);
-            return false;
-
-        }
-
-    }
-
-    public static boolean doesBucketExist(final ClientOptions clientOptions, final String bucketName) {
-
-        assert clientOptions != null;
-        assert !isBlank(bucketName);
-
-        try {
-
-            final List<Bucket> buckets = acquireClient(clientOptions).listBuckets();
-
-            for (Bucket bucket : buckets) {
-                if (bucket.getName().equals(bucketName)) {
-                    return true;
-                }
-            }
-
-            return false;
-
-        } catch (AmazonClientException e) {
-
-            LOGGER.warn("Ignored Exception while checking bucket existence", e);
-            return false;
-
-        }
-
-    }
-
-    public static boolean canReadWriteBucket(final ClientOptions clientOptions, final String bucketName) {
-
-        assert clientOptions != null;
-        assert isNotBlank(bucketName);
-
-        try {
-
-            final AmazonS3 client = acquireClient(clientOptions);
-
-            final String fileContent = "testing put and delete";
-            final InputStream inputStream = new ByteArrayInputStream(fileContent.getBytes());
-            final String key = UUID.randomUUID().toString() + ".txt";
-
-            final ObjectMetadata metadata = new ObjectMetadata();
-            metadata.setContentLength(fileContent.length());
-
-            client.putObject(bucketName, key, inputStream, metadata);
-            client.deleteObject(bucketName, key);
-
-            return true;
-
-        } catch (AmazonClientException e) {
-
-            return false;
-
-        }
-
-    }
-
-    public static List<String> checkClientOptions(ClientOptions clientOptions) {
-
-        assert clientOptions != null;
-
-        List<String> errorMessages = new ArrayList<String>();
-
-        errorMessages.addAll(checkRequiredField("access key", clientOptions.getAccessKey()));
-        errorMessages.addAll(checkRequiredField("secret key", clientOptions.getSecretKey()));
-
-        errorMessages.addAll(checkOptionalField("connection timeout", clientOptions.getConnectionTimeout()));
-        errorMessages.addAll(checkOptionalField("socket timeout", clientOptions.getSocketTimeout()));
-        errorMessages.addAll(checkOptionalField("max error retries", clientOptions.getMaxErrorRetry()));
-        errorMessages.addAll(checkOptionalField("connection ttl", clientOptions.getConnectionTtl()));
-
-        return unmodifiableList(errorMessages);
-
-    }
-
-    public static List<String> checkBucketName(final String bucketLabel, final String bucket) {
-
-        assert isNotBlank(bucketLabel);
-        assert isNotBlank(bucket);
-
-        final List<String> errorMessages = new ArrayList<String>();
-
-        if (bucket.length() < MIN_BUCKET_NAME_LENGTH) {
-            errorMessages.add(format("The length of %1$s " + "for the %2$s must have a length of at least %3$s " + "characters", bucket, bucketLabel,
-                MIN_BUCKET_NAME_LENGTH));
-        }
-
-        if (bucket.length() > MAX_BUCKET_NAME_LENGTH) {
-            errorMessages.add(format("The length of %1$s " + "for the %2$s must not have a length of at greater" + " than %3$s characters", bucket, bucketLabel,
-                MAX_BUCKET_NAME_LENGTH));
-        }
-
-        return unmodifiableList(errorMessages);
-
-    }
-
-    private static List<String> checkOptionalField(final String fieldName, final Integer fieldValue) {
-        if (fieldValue != null && fieldValue < 0) {
-            return singletonList(format("The value of %1$s must " + "be greater than zero.", fieldName));
-        }
-        return emptyList();
-    }
-
-    private static List<String> checkRequiredField(String fieldName, String fieldValue) {
-        if (isBlank(fieldValue)) {
-            return singletonList(format("A %1$s must be specified.", fieldName));
-        }
-        return emptyList();
-    }
-
-    public interface ClientOptions {
-
-        String getAccessKey();
-
-        String getSecretKey();
-
-        String getEndPoint();
-
-        Boolean isHttps();
-
-        Integer getConnectionTimeout();
-
-        Integer getMaxErrorRetry();
-
-        Integer getSocketTimeout();
-
-        Boolean getUseTCPKeepAlive();
-
-        Integer getConnectionTtl();
-    }
-
-    public interface ObjectNamingStrategy {
-
-        String determineKey(File file);
-
-    }
-
-    public interface FileNamingStrategy {
-
-        String determineFileName(String key);
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/net/HTTPUtils.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/net/HTTPUtils.java b/utils/src/main/java/com/cloud/utils/net/HTTPUtils.java
new file mode 100644
index 0000000..95aee6e
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/net/HTTPUtils.java
@@ -0,0 +1,143 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.utils.net;
+
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.HttpMethod;
+import org.apache.commons.httpclient.HttpMethodRetryHandler;
+import org.apache.commons.httpclient.HttpStatus;
+import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
+import org.apache.commons.httpclient.NoHttpResponseException;
+import org.apache.commons.httpclient.UsernamePasswordCredentials;
+import org.apache.commons.httpclient.auth.AuthScope;
+import org.apache.log4j.Logger;
+
+import java.io.IOException;
+
+public final class HTTPUtils {
+
+    private static final Logger LOGGER = Logger.getLogger(HTTPUtils.class);
+
+    // The connection manager.
+    private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager();
+
+    private HTTPUtils() {}
+
+    public static HttpClient getHTTPClient() {
+        return new HttpClient(s_httpClientManager);
+    }
+
+    /**
+     * @return A HttpMethodRetryHandler with given number of retries.
+     */
+    public static HttpMethodRetryHandler getHttpMethodRetryHandler(final int retryCount) {
+
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Initializing new HttpMethodRetryHandler with retry count " + retryCount);
+        }
+
+        return new HttpMethodRetryHandler() {
+            @Override
+            public boolean retryMethod(final HttpMethod method, final IOException exception, int executionCount) {
+                if (executionCount >= retryCount) {
+                    // Do not retry if over max retry count
+                    return false;
+                }
+                if (exception instanceof NoHttpResponseException) {
+                    // Retry if the server dropped connection on us
+                    return true;
+                }
+                if (!method.isRequestSent()) {
+                    // Retry if the request has not been sent fully or
+                    // if it's OK to retry methods that have been sent
+                    return true;
+                }
+                // otherwise do not retry
+                return false;
+            }
+        };
+    }
+
+    /**
+     * @param proxy
+     * @param httpClient
+     */
+    public static void setProxy(Proxy proxy, HttpClient httpClient) {
+        if (proxy != null && httpClient != null) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Setting proxy with host " + proxy.getHost() + " and port " + proxy.getPort() + " for host " + httpClient.getHostConfiguration().getHost() + ":" + httpClient.getHostConfiguration().getPort());
+            }
+
+            httpClient.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort());
+            if (proxy.getUserName() != null && proxy.getPassword() != null) {
+                httpClient.getState().setProxyCredentials(AuthScope.ANY, new UsernamePasswordCredentials(proxy.getUserName(), proxy.getPassword()));
+            }
+        }
+    }
+
+    /**
+     * @param username
+     * @param password
+     * @param httpClient
+     */
+    public static void setCredentials(String username, String password, HttpClient httpClient) {
+        if (username != null && password != null && httpClient != null) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Setting credentials with username " + username + " for host " + httpClient.getHostConfiguration().getHost() + ":" + httpClient.getHostConfiguration().getPort());
+            }
+
+            httpClient.getParams().setAuthenticationPreemptive(true);
+            httpClient.getState().setCredentials(
+                    new AuthScope(httpClient.getHostConfiguration().getHost(), httpClient.getHostConfiguration().getPort(), AuthScope.ANY_REALM), new UsernamePasswordCredentials(username, password));
+        }
+    }
+
+    /**
+     * @param httpClient
+     * @param httpMethod
+     * @return
+     *          Returns the HTTP Status Code or -1 if an exception occurred.
+     */
+    public static int executeMethod(HttpClient httpClient, HttpMethod httpMethod) {
+        // Execute GetMethod
+        try {
+            return httpClient.executeMethod(httpMethod);
+        } catch (IOException e) {
+            LOGGER.warn("Exception while executing HttpMethod " + httpMethod.getName() + " on URL " + httpMethod.getPath());
+            return  -1;
+        }
+    }
+
+    /**
+     * @param responseCode
+     * @return
+     */
+    public static boolean verifyResponseCode(int responseCode) {
+        switch (responseCode) {
+            case HttpStatus.SC_OK:
+            case HttpStatus.SC_MOVED_PERMANENTLY:
+            case HttpStatus.SC_MOVED_TEMPORARILY:
+                return true;
+            default:
+                return false;
+
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/net/Proxy.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/net/Proxy.java b/utils/src/main/java/com/cloud/utils/net/Proxy.java
new file mode 100644
index 0000000..a4475c2
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/net/Proxy.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.utils.net;
+
+import java.net.URI;
+
+/**
+ * Download Proxy
+ */
+public class Proxy {
+    private String _host;
+    private int _port;
+    private String _userName;
+    private String _password;
+
+    public Proxy(String host, int port, String userName, String password) {
+        this._host = host;
+        this._port = port;
+        this._userName = userName;
+        this._password = password;
+    }
+
+    public Proxy(URI uri) {
+        this._host = uri.getHost();
+        this._port = uri.getPort() == -1 ? 3128 : uri.getPort();
+        String userInfo = uri.getUserInfo();
+        if (userInfo != null) {
+            String[] tokens = userInfo.split(":");
+            if (tokens.length == 1) {
+                this._userName = userInfo;
+                this._password = "";
+            } else if (tokens.length == 2) {
+                this._userName = tokens[0];
+                this._password = tokens[1];
+            }
+        }
+    }
+
+    public String getHost() {
+        return _host;
+    }
+
+    public int getPort() {
+        return _port;
+    }
+
+    public String getUserName() {
+        return _userName;
+    }
+
+    public String getPassword() {
+        return _password;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/storage/S3/ClientOptions.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/storage/S3/ClientOptions.java b/utils/src/main/java/com/cloud/utils/storage/S3/ClientOptions.java
new file mode 100644
index 0000000..9c9e0aa
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/storage/S3/ClientOptions.java
@@ -0,0 +1,42 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.utils.storage.S3;
+
+public interface ClientOptions {
+    String getAccessKey();
+
+    String getSecretKey();
+
+    String getEndPoint();
+
+    String getSigner();
+
+    Boolean isHttps();
+
+    Integer getConnectionTimeout();
+
+    Integer getMaxErrorRetry();
+
+    Integer getSocketTimeout();
+
+    Boolean getUseTCPKeepAlive();
+
+    Integer getConnectionTtl();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/storage/S3/FileNamingStrategy.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/storage/S3/FileNamingStrategy.java b/utils/src/main/java/com/cloud/utils/storage/S3/FileNamingStrategy.java
new file mode 100644
index 0000000..5c80e52
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/storage/S3/FileNamingStrategy.java
@@ -0,0 +1,25 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.utils.storage.S3;
+
+public interface FileNamingStrategy {
+
+    String determineFileName(String key);
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/storage/S3/ObjectNamingStrategy.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/storage/S3/ObjectNamingStrategy.java b/utils/src/main/java/com/cloud/utils/storage/S3/ObjectNamingStrategy.java
new file mode 100644
index 0000000..04f3e87
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/storage/S3/ObjectNamingStrategy.java
@@ -0,0 +1,27 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.utils.storage.S3;
+
+import java.io.File;
+
+public interface ObjectNamingStrategy {
+
+    String determineKey(File file);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/main/java/com/cloud/utils/storage/S3/S3Utils.java
----------------------------------------------------------------------
diff --git a/utils/src/main/java/com/cloud/utils/storage/S3/S3Utils.java b/utils/src/main/java/com/cloud/utils/storage/S3/S3Utils.java
new file mode 100644
index 0000000..274ff9b
--- /dev/null
+++ b/utils/src/main/java/com/cloud/utils/storage/S3/S3Utils.java
@@ -0,0 +1,216 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.utils.storage.S3;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.HttpMethod;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.BasicAWSCredentials;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.model.GetObjectRequest;
+import com.amazonaws.services.s3.model.ListObjectsRequest;
+import com.amazonaws.services.s3.model.ObjectListing;
+import com.amazonaws.services.s3.model.PutObjectRequest;
+import com.amazonaws.services.s3.model.S3ObjectInputStream;
+import com.amazonaws.services.s3.model.S3ObjectSummary;
+import com.amazonaws.services.s3.transfer.Download;
+import com.amazonaws.services.s3.transfer.TransferManager;
+import com.amazonaws.services.s3.transfer.Upload;
+import org.apache.log4j.Logger;
+
+import java.io.File;
+import java.io.InputStream;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static com.amazonaws.Protocol.HTTP;
+import static com.amazonaws.Protocol.HTTPS;
+import static java.lang.String.format;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.unmodifiableList;
+import static org.apache.commons.lang.StringUtils.isNotBlank;
+
+
+public final class S3Utils {
+
+    private static final Logger LOGGER = Logger.getLogger(S3Utils.class);
+
+    public static final String SEPARATOR = "/";
+
+    private static final Map<String, TransferManager> TRANSFERMANAGER_ACCESSKEY_MAP = new HashMap<>();
+
+    private S3Utils() {}
+
+    public static TransferManager getTransferManager(final ClientOptions clientOptions) {
+
+        if(TRANSFERMANAGER_ACCESSKEY_MAP.containsKey(clientOptions.getAccessKey())) {
+            return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
+        }
+
+        final AWSCredentials basicAWSCredentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
+
+        final ClientConfiguration configuration = new ClientConfiguration();
+
+        if (clientOptions.isHttps() != null) {
+            configuration.setProtocol(clientOptions.isHttps() ? HTTPS : HTTP);
+        }
+
+        if (clientOptions.getConnectionTimeout() != null) {
+            configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
+        }
+
+        if (clientOptions.getMaxErrorRetry() != null) {
+            configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
+        }
+
+        if (clientOptions.getSocketTimeout() != null) {
+            configuration.setSocketTimeout(clientOptions.getSocketTimeout());
+        }
+
+        if (clientOptions.getUseTCPKeepAlive() != null) {
+            configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
+        }
+
+        if (clientOptions.getConnectionTtl() != null) {
+            configuration.setConnectionTTL(clientOptions.getConnectionTtl());
+        }
+
+        if (clientOptions.getSigner() != null) {
+
+            configuration.setSignerOverride(clientOptions.getSigner());
+        }
+
+        LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, signer: %2$s, connectionTimeOut: %3$s, maxErrorRetry: %4$s, socketTimeout: %5$s, useTCPKeepAlive: %6$s, connectionTtl: %7$s]",
+                configuration.getProtocol(), configuration.getSignerOverride(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(),
+                clientOptions.getUseTCPKeepAlive(), clientOptions.getConnectionTtl()));
+
+        final AmazonS3Client client = new AmazonS3Client(basicAWSCredentials, configuration);
+
+        if (isNotBlank(clientOptions.getEndPoint())) {
+            LOGGER.debug(format("Setting the end point for S3 client with access key %1$s to %2$s.", clientOptions.getAccessKey(), clientOptions.getEndPoint()));
+
+            client.setEndpoint(clientOptions.getEndPoint());
+        }
+
+        TRANSFERMANAGER_ACCESSKEY_MAP.put(clientOptions.getAccessKey(), new TransferManager(client));
+
+        return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
+    }
+
+    public static AmazonS3 getAmazonS3Client(final ClientOptions clientOptions) {
+
+        return getTransferManager(clientOptions).getAmazonS3Client();
+    }
+
+    public static Upload putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) {
+        LOGGER.debug(format("Sending file %1$s as S3 object %2$s in bucket %3$s", sourceFile.getName(), key, bucketName));
+
+        return getTransferManager(clientOptions).upload(bucketName, key, sourceFile);
+    }
+
+    public static Upload putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) {
+        LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s", key, bucketName));
+
+        return getTransferManager(clientOptions).upload(bucketName, key, sourceStream, null);
+    }
+
+    public static Upload putObject(final ClientOptions clientOptions, final PutObjectRequest req) {
+        LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s using PutObjectRequest", req.getKey(), req.getBucketName()));
+
+        return getTransferManager(clientOptions).upload(req);
+    }
+
+    public static Download getFile(final ClientOptions clientOptions, final String bucketName, final String key, final File file) {
+        LOGGER.debug(format("Receiving object %1$s as file %2$s from bucket %3$s", key, file.getAbsolutePath(), bucketName));
+
+        return getTransferManager(clientOptions).download(bucketName, key, file);
+    }
+
+    public static Download getFile(final ClientOptions clientOptions, final GetObjectRequest getObjectRequest, final File file) {
+        LOGGER.debug(format("Receiving object %1$s as file %2$s from bucket %3$s using GetObjectRequest", getObjectRequest.getKey(), file.getAbsolutePath(), getObjectRequest.getBucketName()));
+
+        return getTransferManager(clientOptions).download(getObjectRequest, file);
+    }
+
+    public static URL generatePresignedUrl(final ClientOptions clientOptions, final String bucketName, final String key, final Date expiration) {
+        LOGGER.debug(format("Generating presigned url for key %1s in bucket %2s with expiration date %3s", key, bucketName, expiration.toString()));
+
+        return getTransferManager(clientOptions).getAmazonS3Client().generatePresignedUrl(bucketName, key, expiration, HttpMethod.GET);
+    }
+
+    // Note that whenever S3ObjectInputStream is returned, client code needs to close the internal stream to avoid resource leak.
+    public static S3ObjectInputStream getObjectStream(final ClientOptions clientOptions, final String bucketName, final String key) {
+        LOGGER.debug(format("Get S3ObjectInputStream from S3 Object %1$s in bucket %2$s", key, bucketName));
+
+        return getTransferManager(clientOptions).getAmazonS3Client().getObject(bucketName, key).getObjectContent();
+    }
+
+    public static List<S3ObjectSummary> listDirectory(final ClientOptions clientOptions, final String bucketName, final String directory) {
+        LOGGER.debug(format("Listing S3 directory %1$s in bucket %2$s", directory, bucketName));
+
+        List<S3ObjectSummary> objects = new ArrayList<>();
+        ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
+
+        listObjectsRequest.withBucketName(bucketName);
+        listObjectsRequest.withPrefix(directory);
+
+        ObjectListing ol = getAmazonS3Client(clientOptions).listObjects(listObjectsRequest);
+        if(ol.isTruncated()) {
+            do {
+                objects.addAll(ol.getObjectSummaries());
+                listObjectsRequest.setMarker(ol.getNextMarker());
+                ol = getAmazonS3Client(clientOptions).listObjects(listObjectsRequest);
+            } while (ol.isTruncated());
+        }
+        else {
+            objects.addAll(ol.getObjectSummaries());
+        }
+
+        if (objects.isEmpty()) {
+            return emptyList();
+        }
+
+        return unmodifiableList(objects);
+    }
+
+    public static void deleteObject(final ClientOptions clientOptions, final String bucketName, final String key) {
+        LOGGER.debug(format("Deleting S3 Object %1$s in bucket %2$s", key, bucketName));
+
+        getAmazonS3Client(clientOptions).deleteObject(bucketName,key);
+    }
+
+    public static void deleteDirectory(final ClientOptions clientOptions, final String bucketName, final String directoryName) {
+        LOGGER.debug(format("Deleting S3 Directory %1$s in bucket %2$s", directoryName, bucketName));
+
+        final List<S3ObjectSummary> objects = listDirectory(clientOptions, bucketName, directoryName);
+
+        for (final S3ObjectSummary object : objects) {
+
+            deleteObject(clientOptions, bucketName, object.getKey());
+        }
+
+        deleteObject(clientOptions, bucketName, directoryName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5c0366c9/utils/src/test/java/com/cloud/utils/rest/RESTServiceConnectorTest.java
----------------------------------------------------------------------
diff --git a/utils/src/test/java/com/cloud/utils/rest/RESTServiceConnectorTest.java b/utils/src/test/java/com/cloud/utils/rest/RESTServiceConnectorTest.java
index e26ee6a..63add63 100644
--- a/utils/src/test/java/com/cloud/utils/rest/RESTServiceConnectorTest.java
+++ b/utils/src/test/java/com/cloud/utils/rest/RESTServiceConnectorTest.java
@@ -32,6 +32,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.type.CollectionType;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.http.HttpEntity;
@@ -43,8 +45,6 @@ import org.apache.http.client.protocol.HttpClientContext;
 import org.apache.http.entity.StringEntity;
 import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.message.BasicStatusLine;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.type.CollectionType;
 import org.junit.Test;
 
 import com.google.gson.FieldNamingPolicy;


Mime
View raw message