hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rohithsharm...@apache.org
Subject [32/50] [abbrv] hadoop git commit: HADOOP-12269. Update aws-sdk dependency to 1.10.6 (Thomas Demoor via Lei (Eddy) Xu)
Date Fri, 07 Aug 2015 05:18:59 GMT
HADOOP-12269. Update aws-sdk dependency to 1.10.6 (Thomas Demoor via Lei (Eddy) Xu)

(cherry picked from commit d5403747b57b1e294e533ce17f197e7be8f5339c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7b0f292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7b0f292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7b0f292

Branch: refs/heads/master
Commit: f7b0f292e722fa819900f455a070be1d7bf97072
Parents: b64d2ac
Author: Lei Xu <lei@cloudera.com>
Authored: Tue Aug 4 18:51:52 2015 -0700
Committer: Lei Xu <lei@cloudera.com>
Committed: Tue Aug 4 18:56:07 2015 -0700

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         |  6 +++++
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |  4 +--
 hadoop-project/pom.xml                          |  4 +--
 hadoop-tools/hadoop-aws/pom.xml                 |  2 +-
 .../org/apache/hadoop/fs/s3a/Constants.java     |  9 ++++---
 .../hadoop/fs/s3a/S3AFastOutputStream.java      |  2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 26 +++++++++++---------
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   | 15 +++++++----
 .../src/site/markdown/tools/hadoop-aws/index.md |  6 +++++
 9 files changed, 49 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ee42675..1e54e6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -867,6 +867,12 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.s3a.signing-algorithm</name>
+  <description>Override the default signing algorithm so legacy
+    implementations can still be used</description>
+</property>
+
+<property>
   <name>fs.s3a.buffer.dir</name>
   <value>${hadoop.tmp.dir}/s3a</value>
   <description>Comma separated list of directories that will be used to buffer file


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index c2fe2bf..f48b439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -132,7 +132,7 @@
         </exclusion>
         <exclusion>
           <groupId>com.amazonaws</groupId>
-          <artifactId>aws-java-sdk</artifactId>
+          <artifactId>aws-java-sdk-s3</artifactId>
         </exclusion>
         <exclusion>
           <groupId>org.eclipse.jdt</groupId>
@@ -175,7 +175,7 @@
         </exclusion>
         <exclusion>
           <groupId>com.amazonaws</groupId>
-          <artifactId>aws-java-sdk</artifactId>
+          <artifactId>aws-java-sdk-s3</artifactId>
         </exclusion>
         <exclusion>
           <groupId>org.eclipse.jdt</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6686018..0ce2896 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -656,8 +656,8 @@
       </dependency>
       <dependency>
         <groupId>com.amazonaws</groupId>
-        <artifactId>aws-java-sdk</artifactId>
-        <version>1.7.4</version>
+        <artifactId>aws-java-sdk-s3</artifactId>
+        <version>1.10.6</version>
       </dependency>
       <dependency>
         <groupId>org.apache.mina</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index efb3268..30126ac 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -119,7 +119,7 @@
 
     <dependency>
       <groupId>com.amazonaws</groupId>
-      <artifactId>aws-java-sdk</artifactId>
+      <artifactId>aws-java-sdk-s3</artifactId>
       <scope>compile</scope>
     </dependency>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 3486dfb..fe8dd77 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -82,8 +82,8 @@ public class Constants {
   
   // minimum size in bytes before we start a multipart uploads or copy
   public static final String MIN_MULTIPART_THRESHOLD = "fs.s3a.multipart.threshold";
-  public static final int DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;
-  
+  public static final long DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;
+
   // comma separated list of directories
   public static final String BUFFER_DIR = "fs.s3a.buffer.dir";
 
@@ -111,7 +111,10 @@ public class Constants {
   // s3 server-side encryption
   public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM = 
     "fs.s3a.server-side-encryption-algorithm";
-  
+
+  //override signature algorithm used for signing requests
+  public static final String SIGNING_ALGORITHM = "fs.s3a.signing-algorithm";
+
   public static final String S3N_FOLDER_SUFFIX = "_$folder$";
   public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
   public static final String FS_S3A = "s3a";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
index 6819581..2e06fba 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
@@ -267,7 +267,7 @@ public class S3AFastOutputStream extends OutputStream {
   private ObjectMetadata createDefaultMetadata() {
     ObjectMetadata om = new ObjectMetadata();
     if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
-      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
+      om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
     }
     return om;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 91a606c..f9e937f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -87,7 +87,7 @@ public class S3AFileSystem extends FileSystem {
   private long partSize;
   private TransferManager transfers;
   private ThreadPoolExecutor threadPoolExecutor;
-  private int multiPartThreshold;
+  private long multiPartThreshold;
   public static final Logger LOG = LoggerFactory.getLogger(S3AFileSystem.class);
   private CannedAccessControlList cannedACL;
   private String serverSideEncryptionAlgorithm;
@@ -191,8 +191,12 @@ public class S3AFileSystem extends FileSystem {
         DEFAULT_ESTABLISH_TIMEOUT));
     awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
       DEFAULT_SOCKET_TIMEOUT));
+    String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
+    if(!signerOverride.isEmpty()) {
+      awsConf.setSignerOverride(signerOverride);
+    }
 
-    String proxyHost = conf.getTrimmed(PROXY_HOST,"");
+    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
     int proxyPort = conf.getInt(PROXY_PORT, -1);
     if (!proxyHost.isEmpty()) {
       awsConf.setProxyHost(proxyHost);
@@ -246,7 +250,7 @@ public class S3AFileSystem extends FileSystem {
 
     maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
     partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
-    multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD,
+    multiPartThreshold = conf.getLong(MIN_MULTIPART_THRESHOLD,
       DEFAULT_MIN_MULTIPART_THRESHOLD);
 
     if (partSize < 5 * 1024 * 1024) {
@@ -403,7 +407,7 @@ public class S3AFileSystem extends FileSystem {
     if (getConf().getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD)) {
       return new FSDataOutputStream(new S3AFastOutputStream(s3, this, bucket,
           key, progress, statistics, cannedACL,
-          serverSideEncryptionAlgorithm, partSize, (long)multiPartThreshold,
+          serverSideEncryptionAlgorithm, partSize, multiPartThreshold,
           threadPoolExecutor), statistics);
     }
     // We pass null to FSDataOutputStream so it won't count writes that are being buffered
to a file
@@ -1027,7 +1031,7 @@ public class S3AFileSystem extends FileSystem {
 
     final ObjectMetadata om = new ObjectMetadata();
     if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
-      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
+      om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
     }
     PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
     putObjectRequest.setCannedAcl(cannedACL);
@@ -1035,8 +1039,8 @@ public class S3AFileSystem extends FileSystem {
 
     ProgressListener progressListener = new ProgressListener() {
       public void progressChanged(ProgressEvent progressEvent) {
-        switch (progressEvent.getEventCode()) {
-          case ProgressEvent.PART_COMPLETED_EVENT_CODE:
+        switch (progressEvent.getEventType()) {
+          case TRANSFER_PART_COMPLETED_EVENT:
             statistics.incrementWriteOps(1);
             break;
           default:
@@ -1091,7 +1095,7 @@ public class S3AFileSystem extends FileSystem {
     ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
     final ObjectMetadata dstom = srcom.clone();
     if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
-      dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
+      dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
     }
     CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
     copyObjectRequest.setCannedAccessControlList(cannedACL);
@@ -1099,8 +1103,8 @@ public class S3AFileSystem extends FileSystem {
 
     ProgressListener progressListener = new ProgressListener() {
       public void progressChanged(ProgressEvent progressEvent) {
-        switch (progressEvent.getEventCode()) {
-          case ProgressEvent.PART_COMPLETED_EVENT_CODE:
+        switch (progressEvent.getEventType()) {
+          case TRANSFER_PART_COMPLETED_EVENT:
             statistics.incrementWriteOps(1);
             break;
           default:
@@ -1187,7 +1191,7 @@ public class S3AFileSystem extends FileSystem {
     final ObjectMetadata om = new ObjectMetadata();
     om.setContentLength(0L);
     if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
-      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
+      om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
     }
     PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im,
om);
     putObjectRequest.setCannedAcl(cannedACL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
index 2b611b6..3e079f2 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.s3a;
 
 import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressEventType;
 import com.amazonaws.event.ProgressListener;
 import com.amazonaws.services.s3.AmazonS3Client;
 import com.amazonaws.services.s3.model.CannedAccessControlList;
@@ -41,6 +42,8 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import static com.amazonaws.event.ProgressEventType.TRANSFER_COMPLETED_EVENT;
+import static com.amazonaws.event.ProgressEventType.TRANSFER_PART_STARTED_EVENT;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 
 public class S3AOutputStream extends OutputStream {
@@ -52,7 +55,7 @@ public class S3AOutputStream extends OutputStream {
   private TransferManager transfers;
   private Progressable progress;
   private long partSize;
-  private int partSizeThreshold;
+  private long partSizeThreshold;
   private S3AFileSystem fs;
   private CannedAccessControlList cannedACL;
   private FileSystem.Statistics statistics;
@@ -76,7 +79,8 @@ public class S3AOutputStream extends OutputStream {
     this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
 
     partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
-    partSizeThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
+    partSizeThreshold = conf.getLong(MIN_MULTIPART_THRESHOLD,
+        DEFAULT_MIN_MULTIPART_THRESHOLD);
 
     if (conf.get(BUFFER_DIR, null) != null) {
       lDirAlloc = new LocalDirAllocator(BUFFER_DIR);
@@ -116,7 +120,7 @@ public class S3AOutputStream extends OutputStream {
     try {
       final ObjectMetadata om = new ObjectMetadata();
       if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
-        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
+        om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
       }
       PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
       putObjectRequest.setCannedAcl(cannedACL);
@@ -184,8 +188,9 @@ public class S3AOutputStream extends OutputStream {
       }
 
       // There are 3 http ops here, but this should be close enough for now
-      if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE ||
-          progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) {
+      ProgressEventType pet = progressEvent.getEventType();
+      if (pet == TRANSFER_PART_STARTED_EVENT ||
+          pet == TRANSFER_COMPLETED_EVENT) {
         statistics.incrementWriteOps(1);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b0f292/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index e0389c0..5d45e0a 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -289,6 +289,12 @@ If you do any of these: change your credentials immediately!
     </property>
 
     <property>
+      <name>fs.s3a.signing-algorithm</name>
+      <description>Override the default signing algorithm so legacy
+        implementations can still be used</description>
+    </property>
+
+    <property>
       <name>fs.s3a.buffer.dir</name>
       <value>${hadoop.tmp.dir}/s3a</value>
       <description>Comma separated list of directories that will be used to buffer
file


Mime
View raw message