hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1608603 [2/3] - in /hadoop/common/branches/fs-encryption/hadoop-common-project: hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/ hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/ hadoop-...
Date Mon, 07 Jul 2014 20:44:06 GMT
Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java Mon Jul  7 20:43:56 2014
@@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.s3nat
 
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -32,17 +33,19 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.s3.S3Credentials;
 import org.apache.hadoop.fs.s3.S3Exception;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
 import org.jets3t.service.ServiceException;
 import org.jets3t.service.StorageObjectsChunk;
+import org.jets3t.service.impl.rest.HttpException;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.MultipartPart;
 import org.jets3t.service.model.MultipartUpload;
@@ -51,6 +54,8 @@ import org.jets3t.service.model.S3Object
 import org.jets3t.service.model.StorageObject;
 import org.jets3t.service.security.AWSCredentials;
 import org.jets3t.service.utils.MultipartUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -66,8 +71,8 @@ class Jets3tNativeFileSystemStore implem
 
   private String serverSideEncryptionAlgorithm;
   
-  public static final Log LOG =
-      LogFactory.getLog(Jets3tNativeFileSystemStore.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(Jets3tNativeFileSystemStore.class);
 
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
@@ -79,7 +84,7 @@ class Jets3tNativeFileSystemStore implem
             s3Credentials.getSecretAccessKey());
       this.s3Service = new RestS3Service(awsCredentials);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     multipartEnabled =
         conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
@@ -115,16 +120,10 @@ class Jets3tNativeFileSystemStore implem
         object.setMd5Hash(md5Hash);
       }
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     } finally {
-      if (in != null) {
-        try {
-          in.close();
-        } catch (IOException e) {
-          // ignore
-        }
-      }
+      IOUtils.closeStream(in);
     }
   }
 
@@ -147,10 +146,8 @@ class Jets3tNativeFileSystemStore implem
     try {
       mpUtils.uploadObjects(bucket.getName(), s3Service,
                             objectsToUploadAsMultipart, null);
-    } catch (ServiceException e) {
-      handleServiceException(e);
     } catch (Exception e) {
-      throw new S3Exception(e);
+      handleException(e, key);
     }
   }
   
@@ -163,8 +160,8 @@ class Jets3tNativeFileSystemStore implem
       object.setContentLength(0);
       object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     }
   }
 
@@ -172,20 +169,21 @@ class Jets3tNativeFileSystemStore implem
   public FileMetadata retrieveMetadata(String key) throws IOException {
     StorageObject object = null;
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting metadata for key: {} from bucket: {}",
+          key, bucket.getName());
       object = s3Service.getObjectDetails(bucket.getName(), key);
       return new FileMetadata(key, object.getContentLength(),
           object.getLastModifiedDate().getTime());
 
     } catch (ServiceException e) {
-      // Following is brittle. Is there a better way?
-      if ("NoSuchKey".equals(e.getErrorCode())) {
-        return null; //return null if key not found
+      try {
+        // process
+        handleException(e, key);
+        return null;
+      } catch (FileNotFoundException fnfe) {
+        // and downgrade missing files
+        return null;
       }
-      handleServiceException(e);
-      return null; //never returned - keep compiler happy
     } finally {
       if (object != null) {
         object.closeDataInputStream();
@@ -204,13 +202,12 @@ class Jets3tNativeFileSystemStore implem
   @Override
   public InputStream retrieve(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting key: {} from bucket: {}",
+          key, bucket.getName());
       S3Object object = s3Service.getObject(bucket.getName(), key);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
       return null; //return null if key not found
     }
   }
@@ -228,15 +225,14 @@ class Jets3tNativeFileSystemStore implem
   public InputStream retrieve(String key, long byteRangeStart)
           throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
-      }
+      LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
+          key, bucket.getName(), byteRangeStart);
       S3Object object = s3Service.getObject(bucket, key, null, null, null,
                                             null, byteRangeStart, null);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
-      return null; //return null if key not found
+      handleException(e, key);
+      return null;
     }
   }
 
@@ -254,17 +250,19 @@ class Jets3tNativeFileSystemStore implem
   }
 
   /**
-   *
-   * @return
-   * This method returns null if the list could not be populated
-   * due to S3 giving ServiceException
-   * @throws IOException
+   * list objects
+   * @param prefix prefix
+   * @param delimiter delimiter
+   * @param maxListingLength max no. of entries
+   * @param priorLastKey last key in any previous search
+   * @return a list of matches
+   * @throws IOException on any reported failure
    */
 
   private PartialListing list(String prefix, String delimiter,
       int maxListingLength, String priorLastKey) throws IOException {
     try {
-      if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
+      if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
       StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
@@ -279,24 +277,20 @@ class Jets3tNativeFileSystemStore implem
       }
       return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
           chunk.getCommonPrefixes());
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
-      return null; //never returned - keep compiler happy
     } catch (ServiceException e) {
-      handleServiceException(e);
-      return null; //return null if list could not be populated
+      handleException(e, prefix);
+      return null; // never returned - keep compiler happy
     }
   }
 
   @Override
   public void delete(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
-      }
+      LOG.debug("Deleting key: {} from bucket: {}",
+          key, bucket.getName());
       s3Service.deleteObject(bucket, key);
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
     }
   }
 
@@ -304,7 +298,7 @@ class Jets3tNativeFileSystemStore implem
     try {
       s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcKey);
     }
   }
   
@@ -329,7 +323,7 @@ class Jets3tNativeFileSystemStore implem
       s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
           dstObject, false);
     } catch (ServiceException e) {
-      handleServiceException(srcKey, e);
+      handleException(e, srcKey);
     }
   }
 
@@ -364,19 +358,22 @@ class Jets3tNativeFileSystemStore implem
       Collections.reverse(listedParts);
       s3Service.multipartCompleteUpload(multipartUpload, listedParts);
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcObject.getKey());
     }
   }
 
   @Override
   public void purge(String prefix) throws IOException {
+    String key = "";
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      S3Object[] objects =
+          s3Service.listObjects(bucket.getName(), prefix, null);
       for (S3Object object : objects) {
-        s3Service.deleteObject(bucket, object.getKey());
+        key = object.getKey();
+        s3Service.deleteObject(bucket, key);
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e, key);
     }
   }
 
@@ -390,39 +387,97 @@ class Jets3tNativeFileSystemStore implem
         sb.append(object.getKey()).append("\n");
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     System.out.println(sb);
   }
 
-  private void handleServiceException(String key, ServiceException e) throws IOException {
-    if ("NoSuchKey".equals(e.getErrorCode())) {
-      throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
-    } else {
-      handleServiceException(e);
-    }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e) throws IOException {
+    throw processException(e, e, "");
   }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @param key key sought from object store
 
-  private void handleS3ServiceException(S3ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
-      }
-      throw new S3Exception(e);
-    }
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e, String key) throws IOException {
+    throw processException(e, e, key);
   }
 
-  private void handleServiceException(ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
-      }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param thrown exception
+   * @param original original exception -thrown if no other translation could
+   * be made
+   * @param key key sought from object store or "" for undefined
+   * @return an exception to throw. If isProcessingCause==true this may be null.
+   */
+  private IOException processException(Throwable thrown, Throwable original,
+      String key) {
+    IOException result;
+    if (thrown.getCause() != null) {
+      // recurse down
+      result = processException(thrown.getCause(), original, key);
+    } else if (thrown instanceof HttpException) {
+      // nested HttpException - examine error code and react
+      HttpException httpException = (HttpException) thrown;
+      String responseMessage = httpException.getResponseMessage();
+      int responseCode = httpException.getResponseCode();
+      String bucketName = "s3n://" + bucket.getName();
+      String text = String.format("%s : %03d : %s",
+          bucketName,
+          responseCode,
+          responseMessage);
+      String filename = !key.isEmpty() ? (bucketName + "/" + key) : text;
+      IOException ioe;
+      switch (responseCode) {
+        case 404:
+          result = new FileNotFoundException(filename);
+          break;
+        case 416: // invalid range
+          result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+                                    +": " + filename);
+          break;
+        case 403: //forbidden
+          result = new AccessControlException("Permission denied"
+                                    +": " + filename);
+          break;
+        default:
+          result = new IOException(text);
+      }
+      result.initCause(thrown);
+    } else if (thrown instanceof S3ServiceException) {
+      S3ServiceException se = (S3ServiceException) thrown;
+      LOG.debug(
+          "S3ServiceException: {}: {} : {}",
+          se.getS3ErrorCode(), se.getS3ErrorMessage(), se, se);
+      if ("InvalidRange".equals(se.getS3ErrorCode())) {
+        result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+      } else {
+        result = new S3Exception(se);
+      }
+    } else if (thrown instanceof ServiceException) {
+      ServiceException se = (ServiceException) thrown;
+      LOG.debug("S3ServiceException: {}: {} : {}",
+          se.getErrorCode(), se.toString(), se, se);
+      result = new S3Exception(se);
+    } else if (thrown instanceof IOException) {
+      result = (IOException) thrown;
+    } else {
+      // here there is no exception derived yet.
+      // this means no inner cause, and no translation made yet.
+      // convert the original to an IOException -rather than just the
+      // exception at the base of the tree
+      result = new S3Exception(original);
     }
+
+    return result;
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java Mon Jul  7 20:43:56 2014
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.s3native;
 
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -37,15 +38,16 @@ import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -55,6 +57,8 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -81,8 +85,8 @@ import org.apache.hadoop.util.Progressab
 @InterfaceStability.Stable
 public class NativeS3FileSystem extends FileSystem {
   
-  public static final Log LOG = 
-    LogFactory.getLog(NativeS3FileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(NativeS3FileSystem.class);
   
   private static final String FOLDER_SUFFIX = "_$folder$";
   static final String PATH_DELIMITER = Path.SEPARATOR;
@@ -97,6 +101,7 @@ public class NativeS3FileSystem extends 
     private long pos = 0;
     
     public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
+      Preconditions.checkNotNull(in, "Null input stream");
       this.store = store;
       this.statistics = statistics;
       this.in = in;
@@ -105,13 +110,20 @@ public class NativeS3FileSystem extends 
     
     @Override
     public synchronized int read() throws IOException {
-      int result = -1;
+      int result;
       try {
         result = in.read();
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
-        seek(pos);
-        result = in.read();
+        LOG.info("Received IOException while reading '{}', attempting to reopen",
+            key);
+        LOG.debug("{}", e, e);
+        try {
+          seek(pos);
+          result = in.read();
+        } catch (EOFException eof) {
+          LOG.debug("EOF on input stream read: {}", eof, eof);
+          result = -1;
+        }
       } 
       if (result != -1) {
         pos++;
@@ -124,12 +136,17 @@ public class NativeS3FileSystem extends 
     @Override
     public synchronized int read(byte[] b, int off, int len)
       throws IOException {
-      
+      if (in == null) {
+        throw new EOFException("Cannot read closed stream");
+      }
       int result = -1;
       try {
         result = in.read(b, off, len);
+      } catch (EOFException eof) {
+        throw eof;
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
+        LOG.info( "Received IOException while reading '{}'," +
+                  " attempting to reopen.", key);
         seek(pos);
         result = in.read(b, off, len);
       }
@@ -143,17 +160,53 @@ public class NativeS3FileSystem extends 
     }
 
     @Override
-    public void close() throws IOException {
-      in.close();
+    public synchronized void close() throws IOException {
+      closeInnerStream();
+    }
+
+    /**
+     * Close the inner stream if not null. Even if an exception
+     * is raised during the close, the field is set to null
+     * @throws IOException if raised by the close() operation.
+     */
+    private void closeInnerStream() throws IOException {
+      if (in != null) {
+        try {
+          in.close();
+        } finally {
+          in = null;
+        }
+      }
+    }
+
+    /**
+     * Update inner stream with a new stream and position
+     * @param newStream new stream -must not be null
+     * @param newpos new position
+     * @throws IOException IO exception on a failure to close the existing
+     * stream.
+     */
+    private synchronized void updateInnerStream(InputStream newStream, long newpos) throws IOException {
+      Preconditions.checkNotNull(newStream, "Null newstream argument");
+      closeInnerStream();
+      in = newStream;
+      this.pos = newpos;
     }
 
     @Override
-    public synchronized void seek(long pos) throws IOException {
-      in.close();
-      LOG.info("Opening key '" + key + "' for reading at position '" + pos + "'");
-      in = store.retrieve(key, pos);
-      this.pos = pos;
+    public synchronized void seek(long newpos) throws IOException {
+      if (newpos < 0) {
+        throw new EOFException(
+            FSExceptionMessages.NEGATIVE_SEEK);
+      }
+      if (pos != newpos) {
+        // the seek is attempting to move the current position
+        LOG.debug("Opening key '{}' for reading at position '{}", key, newpos);
+        InputStream newStream = store.retrieve(key, newpos);
+        updateInnerStream(newStream, newpos);
+      }
     }
+
     @Override
     public synchronized long getPos() throws IOException {
       return pos;
@@ -214,7 +267,7 @@ public class NativeS3FileSystem extends 
       }
 
       backupStream.close();
-      LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
+      LOG.info("OutputStream for key '{}' closed. Now beginning upload", key);
       
       try {
         byte[] md5Hash = digest == null ? null : digest.digest();
@@ -226,7 +279,7 @@ public class NativeS3FileSystem extends 
         super.close();
         closed = true;
       } 
-      LOG.info("OutputStream for key '" + key + "' upload complete");
+      LOG.info("OutputStream for key '{}' upload complete", key);
     }
 
     @Override
@@ -339,7 +392,7 @@ public class NativeS3FileSystem extends 
       Progressable progress) throws IOException {
 
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists:"+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     
     if(LOG.isDebugEnabled()) {
@@ -367,7 +420,7 @@ public class NativeS3FileSystem extends 
     String key = pathToKey(absolutePath);
     if (status.isDirectory()) {
       if (!recurse && listStatus(f).length > 0) {
-        throw new IOException("Can not delete " + f + " at is a not empty directory and recurse option is false");
+        throw new IOException("Can not delete " + f + " as is a not empty directory and recurse option is false");
       }
 
       createParent(f);
@@ -538,7 +591,7 @@ public class NativeS3FileSystem extends 
     try {
       FileStatus fileStatus = getFileStatus(f);
       if (fileStatus.isFile()) {
-        throw new IOException(String.format(
+        throw new FileAlreadyExistsException(String.format(
             "Can't make directory for path '%s' since it is a file.", f));
 
       }
@@ -556,7 +609,7 @@ public class NativeS3FileSystem extends 
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
     if (fs.isDirectory()) {
-      throw new IOException("'" + f + "' is a directory");
+      throw new FileNotFoundException("'" + f + "' is a directory");
     }
     LOG.info("Opening '" + f + "' for reading");
     Path absolutePath = makeAbsolute(f);

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java Mon Jul  7 20:43:56 2014
@@ -267,6 +267,9 @@ abstract class CommandWithDestination ex
         dst.refreshStatus(); // need to update stat to know it exists now
       }      
       super.recursePath(src);
+      if (dst.stat.isDirectory()) {
+        preserveAttributes(src, dst);
+      }
     } finally {
       dst = savedDst;
     }
@@ -298,44 +301,7 @@ abstract class CommandWithDestination ex
     try {
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
-      if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
-        target.fs.setTimes(
-          target.path,
-          src.stat.getModificationTime(),
-          src.stat.getAccessTime());
-      }
-      if (shouldPreserve(FileAttribute.OWNERSHIP)) {
-        target.fs.setOwner(
-          target.path,
-          src.stat.getOwner(),
-          src.stat.getGroup());
-      }
-      if (shouldPreserve(FileAttribute.PERMISSION) ||
-          shouldPreserve(FileAttribute.ACL)) {
-        target.fs.setPermission(
-          target.path,
-          src.stat.getPermission());
-      }
-      if (shouldPreserve(FileAttribute.ACL)) {
-        FsPermission perm = src.stat.getPermission();
-        if (perm.getAclBit()) {
-          List<AclEntry> srcEntries =
-              src.fs.getAclStatus(src.path).getEntries();
-          List<AclEntry> srcFullEntries =
-              AclUtil.getAclFromPermAndEntries(perm, srcEntries);
-          target.fs.setAcl(target.path, srcFullEntries);
-        }
-      }
-      if (shouldPreserve(FileAttribute.XATTR)) {
-        Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
-        if (srcXAttrs != null) {
-          Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
-          while (iter.hasNext()) {
-            Entry<String, byte[]> entry = iter.next();
-            target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
-          }
-        }
-      }
+      preserveAttributes(src, target);
     } finally {
       IOUtils.closeStream(in);
     }
@@ -365,6 +331,56 @@ abstract class CommandWithDestination ex
     }
   }
 
+  /**
+   * Preserve the attributes of the source to the target.
+   * The method calls {@link #shouldPreserve(FileAttribute)} to check what
+   * attribute to preserve.
+   * @param src source to preserve
+   * @param target where to preserve attributes
+   * @throws IOException if fails to preserve attributes
+   */
+  protected void preserveAttributes(PathData src, PathData target)
+      throws IOException {
+    if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
+      target.fs.setTimes(
+          target.path,
+          src.stat.getModificationTime(),
+          src.stat.getAccessTime());
+    }
+    if (shouldPreserve(FileAttribute.OWNERSHIP)) {
+      target.fs.setOwner(
+          target.path,
+          src.stat.getOwner(),
+          src.stat.getGroup());
+    }
+    if (shouldPreserve(FileAttribute.PERMISSION) ||
+        shouldPreserve(FileAttribute.ACL)) {
+      target.fs.setPermission(
+          target.path,
+          src.stat.getPermission());
+    }
+    if (shouldPreserve(FileAttribute.ACL)) {
+      FsPermission perm = src.stat.getPermission();
+      if (perm.getAclBit()) {
+        List<AclEntry> srcEntries =
+            src.fs.getAclStatus(src.path).getEntries();
+        List<AclEntry> srcFullEntries =
+            AclUtil.getAclFromPermAndEntries(perm, srcEntries);
+        target.fs.setAcl(target.path, srcFullEntries);
+      }
+    }
+    if (shouldPreserve(FileAttribute.XATTR)) {
+      Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
+      if (srcXAttrs != null) {
+        Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
+        while (iter.hasNext()) {
+          Entry<String, byte[]> entry = iter.next();
+          target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+        }
+      }
+    }
+  }
+
   // Helper filter filesystem that registers created files as temp files to
   // be deleted on exit unless successfully renamed
   private static class TargetFileSystem extends FilterFileSystem {

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java Mon Jul  7 20:43:56 2014
@@ -19,21 +19,16 @@
 package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
-import java.net.URI;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.LinkedList;
-import java.util.Set;
 import org.apache.hadoop.util.StringUtils;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-import com.google.common.collect.Sets;
-
 /**
  * Get a listing of all files in that match the file patterns.
  */
@@ -70,7 +65,6 @@ class Ls extends FsCommand {
   protected boolean dirRecurse;
 
   protected boolean humanReadable = false;
-  private Set<URI> aclNotSupportedFsSet = Sets.newHashSet();
 
   protected String formatSize(long size) {
     return humanReadable

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java Mon Jul  7 20:43:56 2014
@@ -104,6 +104,9 @@ class MoveCommands {
         throw new PathIOException(src.toString(),
             "Does not match target filesystem");
       }
+      if (target.exists) {
+        throw new PathExistsException(target.toString());
+      }
       if (!target.fs.rename(src.path, target.path)) {
         // we have no way to know the actual error...
         throw new PathIOException(src.toString());

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java Mon Jul  7 20:43:56 2014
@@ -19,12 +19,13 @@
 package org.apache.hadoop.io.compress.zlib;
 
 import java.io.IOException;
+import java.util.zip.Checksum;
 import java.util.zip.DataFormatException;
 import java.util.zip.Inflater;
 
-import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DoNotPool;
+import org.apache.hadoop.util.DataChecksum;
 
 /**
  * A {@link Decompressor} based on the popular gzip compressed file format.
@@ -54,7 +55,7 @@ public class BuiltInGzipDecompressor imp
   private int headerBytesRead = 0;
   private int trailerBytesRead = 0;
   private int numExtraFieldBytesRemaining = -1;
-  private PureJavaCrc32 crc = new PureJavaCrc32();
+  private Checksum crc = DataChecksum.newCrc32();
   private boolean hasExtraField = false;
   private boolean hasFilename = false;
   private boolean hasComment = false;

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java Mon Jul  7 20:43:56 2014
@@ -379,6 +379,7 @@ public class Client {
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
     private final RetryPolicy connectionRetryPolicy;
+    private final int maxRetriesOnSasl;
     private int maxRetriesOnSocketTimeouts;
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean doPing; //do we need to send ping message
@@ -406,6 +407,7 @@ public class Client {
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
       this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
+      this.maxRetriesOnSasl = remoteId.getMaxRetriesOnSasl();
       this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
@@ -693,7 +695,6 @@ public class Client {
           LOG.debug("Connecting to "+server);
         }
         short numRetries = 0;
-        final short MAX_RETRIES = 5;
         Random rand = null;
         while (true) {
           setupConnection();
@@ -721,8 +722,8 @@ public class Client {
               if (rand == null) {
                 rand = new Random();
               }
-              handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, rand,
-                  ticket);
+              handleSaslConnectionFailure(numRetries++, maxRetriesOnSasl, ex,
+                  rand, ticket);
               continue;
             }
             if (authMethod != AuthMethod.SIMPLE) {
@@ -1478,6 +1479,7 @@ public class Client {
     private final int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
     private final RetryPolicy connectionRetryPolicy;
+    private final int maxRetriesOnSasl;
     // the max. no. of retries for socket connections on time out exceptions
     private final int maxRetriesOnSocketTimeouts;
     private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -1498,6 +1500,9 @@ public class Client {
       this.maxIdleTime = conf.getInt(
           CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
           CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
+      this.maxRetriesOnSasl = conf.getInt(
+          CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
+          CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
       this.maxRetriesOnSocketTimeouts = conf.getInt(
           CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
           CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
@@ -1531,6 +1536,10 @@ public class Client {
       return maxIdleTime;
     }
     
+    public int getMaxRetriesOnSasl() {
+      return maxRetriesOnSasl;
+    }
+
     /** max connection retries on socket time outs */
     public int getMaxRetriesOnSocketTimeouts() {
       return maxRetriesOnSocketTimeouts;

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java Mon Jul  7 20:43:56 2014
@@ -30,6 +30,7 @@ import javax.management.ObjectName;
 import javax.management.ReflectionException;
 
 import static com.google.common.base.Preconditions.*;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -226,7 +227,13 @@ class MetricsSourceAdapter implements Dy
       mbeanName = null;
     }
   }
+  
+  @VisibleForTesting
+  ObjectName getMBeanName() {
+    return mbeanName;
+  }
 
+  
   private void updateInfoCache() {
     LOG.debug("Updating info cache...");
     infoCache = infoBuilder.reset(lastRecs).get();

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java Mon Jul  7 20:43:56 2014
@@ -32,6 +32,7 @@ import javax.management.ObjectName;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Locale;
 import static com.google.common.base.Preconditions.*;
 
@@ -573,6 +574,11 @@ public class MetricsSystemImpl extends M
     return allSources.get(name);
   }
 
+  @VisibleForTesting
+  MetricsSourceAdapter getSourceAdapter(String name) {
+    return sources.get(name);
+  }
+
   private InitMode initMode() {
     LOG.debug("from system property: "+ System.getProperty(MS_INIT_MODE_KEY));
     LOG.debug("from environment variable: "+ System.getenv(MS_INIT_MODE_KEY));

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java Mon Jul  7 20:43:56 2014
@@ -111,6 +111,11 @@ public enum DefaultMetricsSystem {
   }
 
   @InterfaceAudience.Private
+  public static void removeMBeanName(ObjectName name) {
+    INSTANCE.removeObjectName(name.toString());
+  }
+
+  @InterfaceAudience.Private
   public static String sourceName(String name, boolean dupOK) {
     return INSTANCE.newSourceName(name, dupOK);
   }
@@ -126,6 +131,10 @@ public enum DefaultMetricsSystem {
     }
   }
 
+  synchronized void removeObjectName(String name) {
+    mBeanNames.map.remove(name);
+  }
+
   synchronized String newSourceName(String name, boolean dupOK) {
     if (sourceNames.map.containsKey(name)) {
       if (dupOK) {

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java Mon Jul  7 20:43:56 2014
@@ -50,10 +50,6 @@ public class GraphiteSink implements Met
     private String metricsPrefix = null;
     private Socket socket = null;
 
-    public void setWriter(Writer writer) {
-        this.writer = writer;
-    }
-
     @Override
     public void init(SubsetConfiguration conf) {
         // Get Graphite host configurations.
@@ -68,7 +64,7 @@ public class GraphiteSink implements Met
         try {
             // Open an connection to Graphite server.
             socket = new Socket(serverHost, serverPort);
-            setWriter(new OutputStreamWriter(socket.getOutputStream()));
+            writer = new OutputStreamWriter(socket.getOutputStream());
         } catch (Exception e) {
             throw new MetricsException("Error creating connection, "
                     + serverHost + ":" + serverPort, e);

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java Mon Jul  7 20:43:56 2014
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.De
 import org.apache.hadoop.metrics2.lib.Interns;
 import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*;
 import static org.apache.hadoop.metrics2.impl.MsInfo.*;
+import org.apache.hadoop.util.JvmPauseMonitor;
 
 /**
  * JVM and logging related metrics.
@@ -65,6 +66,7 @@ public class JvmMetrics implements Metri
       ManagementFactory.getGarbageCollectorMXBeans();
   final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
   final String processName, sessionId;
+  private JvmPauseMonitor pauseMonitor = null;
   final ConcurrentHashMap<String, MetricsInfo[]> gcInfoCache =
       new ConcurrentHashMap<String, MetricsInfo[]>();
 
@@ -73,6 +75,10 @@ public class JvmMetrics implements Metri
     this.sessionId = sessionId;
   }
 
+  public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
+    this.pauseMonitor = pauseMonitor;
+  }
+
   public static JvmMetrics create(String processName, String sessionId,
                                   MetricsSystem ms) {
     return ms.register(JvmMetrics.name(), JvmMetrics.description(),
@@ -120,6 +126,15 @@ public class JvmMetrics implements Metri
     }
     rb.addCounter(GcCount, count)
       .addCounter(GcTimeMillis, timeMillis);
+    
+    if (pauseMonitor != null) {
+      rb.addCounter(GcNumWarnThresholdExceeded,
+          pauseMonitor.getNumGcWarnThreadholdExceeded());
+      rb.addCounter(GcNumInfoThresholdExceeded,
+          pauseMonitor.getNumGcInfoThresholdExceeded());
+      rb.addCounter(GcTotalExtraSleepTime,
+          pauseMonitor.getTotalGcExtraSleepTime());
+    }
   }
 
   private MetricsInfo[] getGcInfo(String gcName) {

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java Mon Jul  7 20:43:56 2014
@@ -48,7 +48,10 @@ public enum JvmMetricsInfo implements Me
   LogFatal("Total number of fatal log events"),
   LogError("Total number of error log events"),
   LogWarn("Total number of warning log events"),
-  LogInfo("Total number of info log events");
+  LogInfo("Total number of info log events"),
+  GcNumWarnThresholdExceeded("Number of times that the GC warn threshold is exceeded"),
+  GcNumInfoThresholdExceeded("Number of times that the GC info threshold is exceeded"),
+  GcTotalExtraSleepTime("Total GC extra sleep time in milliseconds");
 
   private final String desc;
 

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java Mon Jul  7 20:43:56 2014
@@ -84,6 +84,7 @@ public class MBeans {
     } catch (Exception e) {
       LOG.warn("Error unregistering "+ mbeanName, e);
     }
+    DefaultMetricsSystem.removeMBeanName(mbeanName);
   }
 
   static private ObjectName getMBeanName(String serviceName, String nameName) {

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java Mon Jul  7 20:43:56 2014
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 /**
  * An interface for the implementation of a user-to-groups mapping service
@@ -30,6 +31,7 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public interface GroupMappingServiceProvider {
+  public static final String GROUP_MAPPING_CONFIG_PREFIX = CommonConfigurationKeysPublic.HADOOP_SECURITY_GROUP_MAPPING;
   
   /**
    * Get all various group memberships of a given user.

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java Mon Jul  7 20:43:56 2014
@@ -20,22 +20,21 @@ package org.apache.hadoop.security.autho
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.Arrays;
-import java.util.List;
+import java.util.Collection;
+import java.util.HashSet;
 import java.util.LinkedList;
-import java.util.ListIterator;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.Groups;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * Class representing a configured access control list.
@@ -58,9 +57,9 @@ public class AccessControlList implement
   private static final int INITIAL_CAPACITY = 256;
 
   // Set of users who are granted access.
-  private Set<String> users;
+  private Collection<String> users;
   // Set of groups which are granted access
-  private Set<String> groups;
+  private Collection<String> groups;
   // Whether all users are granted access.
   private boolean allAllowed;
 
@@ -82,37 +81,44 @@ public class AccessControlList implement
    * @param aclString String representation of the ACL
    */
   public AccessControlList(String aclString) {
-    buildACL(aclString);
+    buildACL(aclString.split(" ", 2));
+  }
+  
+  /**
+   * Construct a new ACL from String representation of users and groups
+   * 
+   * The arguments are comma separated lists
+   * 
+   * @param users comma separated list of users
+   * @param groups comma separated list of groups
+   */
+  public AccessControlList(String users, String groups) {
+    buildACL(new String[] {users, groups});
   }
 
   /**
-   * Build ACL from the given string, format of the string is
-   * user1,...,userN group1,...,groupN
+   * Build ACL from the given two Strings.
+   * The Strings contain comma separated values.
    *
-   * @param aclString build ACL from this string
+   * @param aclString build ACL from array of Strings
    */
-  private void buildACL(String aclString) {
-    users = new TreeSet<String>();
-    groups = new TreeSet<String>();
-    if (isWildCardACLValue(aclString)) {
-      allAllowed = true;
-    } else {
-      allAllowed = false;
-      String[] userGroupStrings = aclString.split(" ", 2);
-
-      if (userGroupStrings.length >= 1) {
-        List<String> usersList = new LinkedList<String>(
-          Arrays.asList(userGroupStrings[0].split(",")));
-        cleanupList(usersList);
-        addToSet(users, usersList);
+  private void buildACL(String[] userGroupStrings) {
+    users = new HashSet<String>();
+    groups = new HashSet<String>();
+    for (String aclPart : userGroupStrings) {
+      if (aclPart != null && isWildCardACLValue(aclPart)) {
+        allAllowed = true;
+        break;
       }
+    }
+    if (!allAllowed) {      
+      if (userGroupStrings.length >= 1 && userGroupStrings[0] != null) {
+        users = StringUtils.getTrimmedStringCollection(userGroupStrings[0]);
+      } 
       
-      if (userGroupStrings.length == 2) {
-        List<String> groupsList = new LinkedList<String>(
-          Arrays.asList(userGroupStrings[1].split(",")));
-        cleanupList(groupsList);
-        addToSet(groups, groupsList);
-        groupsMapping.cacheGroupsAdd(groupsList);
+      if (userGroupStrings.length == 2 && userGroupStrings[1] != null) {
+        groups = StringUtils.getTrimmedStringCollection(userGroupStrings[1]);
+        groupsMapping.cacheGroupsAdd(new LinkedList<String>(groups));
       }
     }
   }
@@ -203,7 +209,7 @@ public class AccessControlList implement
    * Get the names of users allowed for this service.
    * @return the set of user names. the set must not be modified.
    */
-  Set<String> getUsers() {
+  Collection<String> getUsers() {
     return users;
   }
   
@@ -211,7 +217,7 @@ public class AccessControlList implement
    * Get the names of user groups allowed for this service.
    * @return the set of group names. the set must not be modified.
    */
-  Set<String> getGroups() {
+  Collection<String> getGroups() {
     return groups;
   }
 
@@ -229,36 +235,6 @@ public class AccessControlList implement
   }
 
   /**
-   * Cleanup list, remove empty strings, trim leading/trailing spaces
-   *
-   * @param list clean this list
-   */
-  private static final void cleanupList(List<String> list) {
-    ListIterator<String> i = list.listIterator();
-    while(i.hasNext()) {
-      String s = i.next();
-      if(s.length() == 0) {
-        i.remove();
-      } else {
-        s = s.trim();
-        i.set(s);
-      }
-    }
-  }
-
-  /**
-   * Add list to a set
-   *
-   * @param set add list to this set
-   * @param list add items of this list to the set
-   */
-  private static final void addToSet(Set<String> set, List<String> list) {
-    for(String s : list) {
-      set.add(s);
-    }
-  }
-
-  /**
    * Returns descriptive way of users and groups that are part of this ACL.
    * Use {@link #getAclString()} to get the exact String that can be given to
    * the constructor of AccessControlList to create a new instance.
@@ -331,7 +307,7 @@ public class AccessControlList implement
   @Override
   public void readFields(DataInput in) throws IOException {
     String aclString = Text.readString(in);
-    buildACL(aclString);
+    buildACL(aclString.split(" ", 2));
   }
 
   /**
@@ -358,7 +334,7 @@ public class AccessControlList implement
    *
    * @param strings set of strings to concatenate
    */
-  private String getString(Set<String> strings) {
+  private String getString(Collection<String> strings) {
     StringBuilder sb = new StringBuilder(INITIAL_CAPACITY);
     boolean first = true;
     for(String str: strings) {

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java Mon Jul  7 20:43:56 2014
@@ -18,18 +18,15 @@
 
 package org.apache.hadoop.security.authorize;
 
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.MachineList;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -39,44 +36,39 @@ public class DefaultImpersonationProvide
   private static final String CONF_GROUPS = ".groups";
   private static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
   private static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
-  // list of users, groups and hosts per proxyuser
-  private Map<String, Collection<String>> proxyUsers = 
-    new HashMap<String, Collection<String>>(); 
-  private Map<String, Collection<String>> proxyGroups = 
-    new HashMap<String, Collection<String>>();
-  private Map<String, Collection<String>> proxyHosts = 
-    new HashMap<String, Collection<String>>();
+  private static final String CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS = 
+      CONF_HADOOP_PROXYUSER_RE+"[^.]*(" + Pattern.quote(CONF_USERS) +
+      "|" + Pattern.quote(CONF_GROUPS) + ")";
+  private static final String CONF_HADOOP_PROXYUSER_RE_HOSTS = 
+      CONF_HADOOP_PROXYUSER_RE+"[^.]*"+ Pattern.quote(CONF_HOSTS);
+  // acl and list of hosts per proxyuser
+  private Map<String, AccessControlList> proxyUserAcl = 
+    new HashMap<String, AccessControlList>();
+  private static Map<String, MachineList> proxyHosts = 
+    new HashMap<String, MachineList>();
   private Configuration conf;
 
   @Override
   public void setConf(Configuration conf) {
     this.conf = conf;
 
-    // get all the new keys for users
-    String regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_USERS;
-    Map<String,String> allMatchKeys = conf.getValByRegex(regex);
+    // get list of users and groups per proxyuser
+    Map<String,String> allMatchKeys = 
+        conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS); 
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {  
-      Collection<String> users = StringUtils.getTrimmedStringCollection(entry.getValue());
-      proxyUsers.put(entry.getKey(), users);
-    }
-
-    // get all the new keys for groups
-    regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_GROUPS;
-    allMatchKeys = conf.getValByRegex(regex);
-    for(Entry<String, String> entry : allMatchKeys.entrySet()) {
-      Collection<String> groups = StringUtils.getTrimmedStringCollection(entry.getValue());
-      proxyGroups.put(entry.getKey(), groups);
-      //cache the groups. This is needed for NetGroups
-      Groups.getUserToGroupsMappingService(conf).cacheGroupsAdd(
-          new ArrayList<String>(groups));
+      String aclKey = getAclKey(entry.getKey());
+      if (!proxyUserAcl.containsKey(aclKey)) {
+        proxyUserAcl.put(aclKey, new AccessControlList(
+            allMatchKeys.get(aclKey + CONF_USERS) ,
+            allMatchKeys.get(aclKey + CONF_GROUPS)));
+      }
     }
 
-    // now hosts
-    regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_HOSTS;
-    allMatchKeys = conf.getValByRegex(regex);
+    // get hosts per proxyuser
+    allMatchKeys = conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_HOSTS);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
       proxyHosts.put(entry.getKey(),
-          StringUtils.getTrimmedStringCollection(entry.getValue()));
+          new MachineList(entry.getValue()));
     }
   }
 
@@ -88,79 +80,34 @@ public class DefaultImpersonationProvide
   @Override
   public void authorize(UserGroupInformation user, 
       String remoteAddress) throws AuthorizationException {
-
-    if (user.getRealUser() == null) {
+    
+    UserGroupInformation realUser = user.getRealUser();
+    if (realUser == null) {
       return;
     }
-    boolean userAuthorized = false;
-    boolean ipAuthorized = false;
-    UserGroupInformation superUser = user.getRealUser();
-
-    Collection<String> allowedUsers = proxyUsers.get(
-        getProxySuperuserUserConfKey(superUser.getShortUserName()));
-
-    if (isWildcardList(allowedUsers)) {
-      userAuthorized = true;
-    } else if (allowedUsers != null && !allowedUsers.isEmpty()) {
-      if (allowedUsers.contains(user.getShortUserName())) {
-        userAuthorized = true;
-      }
-    }
-
-    if (!userAuthorized){
-      Collection<String> allowedUserGroups = proxyGroups.get(
-          getProxySuperuserGroupConfKey(superUser.getShortUserName()));
-
-      if (isWildcardList(allowedUserGroups)) {
-        userAuthorized = true;
-      } else if (allowedUserGroups != null && !allowedUserGroups.isEmpty()) {
-        for (String group : user.getGroupNames()) {
-          if (allowedUserGroups.contains(group)) {
-            userAuthorized = true;
-            break;
-          }
-        }
-      }
-
-      if (!userAuthorized) {
-        throw new AuthorizationException("User: " + superUser.getUserName()
-            + " is not allowed to impersonate " + user.getUserName());
-      }
+    
+    AccessControlList acl = proxyUserAcl.get(
+        CONF_HADOOP_PROXYUSER+realUser.getShortUserName());
+    if (acl == null || !acl.isUserAllowed(user)) {
+      throw new AuthorizationException("User: " + realUser.getUserName()
+          + " is not allowed to impersonate " + user.getUserName());
     }
 
-    Collection<String> ipList = proxyHosts.get(
-        getProxySuperuserIpConfKey(superUser.getShortUserName()));
+    MachineList MachineList = proxyHosts.get(
+        getProxySuperuserIpConfKey(realUser.getShortUserName()));
 
-    if (isWildcardList(ipList)) {
-      ipAuthorized = true;
-    } else if (ipList != null && !ipList.isEmpty()) {
-      for (String allowedHost : ipList) {
-        InetAddress hostAddr;
-        try {
-          hostAddr = InetAddress.getByName(allowedHost);
-        } catch (UnknownHostException e) {
-          continue;
-        }
-        if (hostAddr.getHostAddress().equals(remoteAddress)) {
-          // Authorization is successful
-          ipAuthorized = true;
-        }
-      }
-    }
-    if(!ipAuthorized) {
+    if(!MachineList.includes(remoteAddress)) {
       throw new AuthorizationException("Unauthorized connection for super-user: "
-          + superUser.getUserName() + " from IP " + remoteAddress);
+          + realUser.getUserName() + " from IP " + remoteAddress);
     }
   }
-
-  /**
-   * Return true if the configuration specifies the special configuration value
-   * "*", indicating that any group or host list is allowed to use this configuration.
-   */
-  private boolean isWildcardList(Collection<String> list) {
-    return (list != null) &&
-    (list.size() == 1) &&
-    (list.contains("*"));
+  
+  private String getAclKey(String key) {
+    int endIndex = key.lastIndexOf(".");
+    if (endIndex != -1) {
+      return key.substring(0, endIndex); 
+    }
+    return key;
   }
   
   /**
@@ -194,17 +141,22 @@ public class DefaultImpersonationProvide
   }
 
   @VisibleForTesting
-  public Map<String, Collection<String>> getProxyUsers() {
-    return proxyUsers;
-  }
-
-  @VisibleForTesting
   public Map<String, Collection<String>> getProxyGroups() {
-    return proxyGroups;
+     Map<String,Collection<String>> proxyGroups = new HashMap<String,Collection<String>>();
+     for(Entry<String, AccessControlList> entry : proxyUserAcl.entrySet()) {
+       proxyGroups.put(entry.getKey() + CONF_GROUPS, entry.getValue().getGroups());
+     }
+     return proxyGroups;
   }
 
   @VisibleForTesting
   public Map<String, Collection<String>> getProxyHosts() {
-    return proxyHosts;
+    Map<String, Collection<String>> tmpProxyHosts = 
+        new HashMap<String, Collection<String>>();
+    for (Map.Entry<String, MachineList> proxyHostEntry :proxyHosts.entrySet()) {
+      tmpProxyHosts.put(proxyHostEntry.getKey(), 
+          proxyHostEntry.getValue().getCollection());
+    }
+    return tmpProxyHosts;
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java Mon Jul  7 20:43:56 2014
@@ -131,6 +131,10 @@ public class ServiceAuthorizationManager
       PolicyProvider provider) {
     final Map<Class<?>, AccessControlList> newAcls =
         new IdentityHashMap<Class<?>, AccessControlList>();
+    
+    String defaultAcl = conf.get(
+        CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
+        AccessControlList.WILDCARD_ACL_VALUE);
 
     // Parse the config file
     Service[] services = provider.getServices();
@@ -139,7 +143,7 @@ public class ServiceAuthorizationManager
         AccessControlList acl =
             new AccessControlList(
                 conf.get(service.getServiceKey(),
-                    AccessControlList.WILDCARD_ACL_VALUE)
+                    defaultAcl)
             );
         newAcls.put(service.getProtocol(), acl);
       }

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java Mon Jul  7 20:43:56 2014
@@ -22,6 +22,7 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -29,7 +30,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.ChecksumException;
 
 /**
- * This class provides inteface and utilities for processing checksums for
+ * This class provides interface and utilities for processing checksums for
  * DFS data transfers.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@@ -72,6 +73,13 @@ public class DataChecksum implements Che
     }
   }
 
+  /**
+   * Create a Crc32 Checksum object. The implementation of the Crc32 algorithm
+   * is chosen depending on the platform.
+   */
+  public static Checksum newCrc32() {
+    return Shell.isJava7OrAbove()? new CRC32(): new PureJavaCrc32();
+  }
 
   public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) {
     if ( bytesPerChecksum <= 0 ) {
@@ -82,7 +90,7 @@ public class DataChecksum implements Che
     case NULL :
       return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum );
     case CRC32 :
-      return new DataChecksum(type, new PureJavaCrc32(), bytesPerChecksum );
+      return new DataChecksum(type, newCrc32(), bytesPerChecksum );
     case CRC32C:
       return new DataChecksum(type, new PureJavaCrc32C(), bytesPerChecksum);
     default:

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java Mon Jul  7 20:43:56 2014
@@ -62,10 +62,13 @@ public class JvmPauseMonitor {
       "jvm.pause.info-threshold.ms";
   private static final long INFO_THRESHOLD_DEFAULT = 1000;
 
-  
+  private long numGcWarnThresholdExceeded = 0;
+  private long numGcInfoThresholdExceeded = 0;
+  private long totalGcExtraSleepTime = 0;
+   
   private Thread monitorThread;
   private volatile boolean shouldRun = true;
-  
+
   public JvmPauseMonitor(Configuration conf) {
     this.warnThresholdMs = conf.getLong(WARN_THRESHOLD_KEY, WARN_THRESHOLD_DEFAULT);
     this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT);
@@ -87,6 +90,22 @@ public class JvmPauseMonitor {
       Thread.currentThread().interrupt();
     }
   }
+
+  public boolean isStarted() {
+    return monitorThread != null;
+  }
+  
+  public long getNumGcWarnThreadholdExceeded() {
+    return numGcWarnThresholdExceeded;
+  }
+  
+  public long getNumGcInfoThresholdExceeded() {
+    return numGcInfoThresholdExceeded;
+  }
+  
+  public long getTotalGcExtraSleepTime() {
+    return totalGcExtraSleepTime;
+  }
   
   private String formatMessage(long extraSleepTime,
       Map<String, GcTimes> gcTimesAfterSleep,
@@ -166,13 +185,15 @@ public class JvmPauseMonitor {
         Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
 
         if (extraSleepTime > warnThresholdMs) {
+          ++numGcWarnThresholdExceeded;
           LOG.warn(formatMessage(
               extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
         } else if (extraSleepTime > infoThresholdMs) {
+          ++numGcInfoThresholdExceeded;
           LOG.info(formatMessage(
               extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
         }
-        
+        totalGcExtraSleepTime += extraSleepTime;
         gcTimesBeforeSleep = gcTimesAfterSleep;
       }
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java Mon Jul  7 20:43:56 2014
@@ -57,38 +57,31 @@ public class PureJavaCrc32 implements Ch
   }
 
   @Override
-  public void update(byte[] b, int off, int len) {
+  public void update(final byte[] b, final int offset, final int len) {
     int localCrc = crc;
 
-    while(len > 7) {
-      final int c0 =(b[off+0] ^ localCrc) & 0xff;
-      final int c1 =(b[off+1] ^ (localCrc >>>= 8)) & 0xff;
-      final int c2 =(b[off+2] ^ (localCrc >>>= 8)) & 0xff;
-      final int c3 =(b[off+3] ^ (localCrc >>>= 8)) & 0xff;
-      localCrc = (T[T8_7_start + c0] ^ T[T8_6_start + c1])
-          ^ (T[T8_5_start + c2] ^ T[T8_4_start + c3]);
+    final int remainder = len & 0x7;
+    int i = offset;
+    for(final int end = offset + len - remainder; i < end; i += 8) {
+      final int x = localCrc
+          ^ ((((b[i  ] << 24) >>> 24) + ((b[i+1] << 24) >>> 16))
+           + (((b[i+2] << 24) >>> 8 ) +  (b[i+3] << 24)));
 
-      final int c4 = b[off+4] & 0xff;
-      final int c5 = b[off+5] & 0xff;
-      final int c6 = b[off+6] & 0xff;
-      final int c7 = b[off+7] & 0xff;
-
-      localCrc ^= (T[T8_3_start + c4] ^ T[T8_2_start + c5])
-           ^ (T[T8_1_start + c6] ^ T[T8_0_start + c7]);
-
-      off += 8;
-      len -= 8;
+      localCrc = ((T[((x << 24) >>> 24) + 0x700] ^ T[((x << 16) >>> 24) + 0x600])
+                ^ (T[((x <<  8) >>> 24) + 0x500] ^ T[ (x        >>> 24) + 0x400]))
+               ^ ((T[((b[i+4] << 24) >>> 24) + 0x300] ^ T[((b[i+5] << 24) >>> 24) + 0x200])
+                ^ (T[((b[i+6] << 24) >>> 24) + 0x100] ^ T[((b[i+7] << 24) >>> 24)]));
     }
 
     /* loop unroll - duff's device style */
-    switch(len) {
-      case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
-      case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
+    switch(remainder) {
+      case 7: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 6: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 5: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 4: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 3: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 2: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
+      case 1: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24];
       default:
         /* nothing */
     }
@@ -99,24 +92,15 @@ public class PureJavaCrc32 implements Ch
 
   @Override
   final public void update(int b) {
-    crc = (crc >>> 8) ^ T[T8_0_start + ((crc ^ b) & 0xff)];
+    crc = (crc >>> 8) ^ T[(((crc ^ b) << 24) >>> 24)];
   }
 
   /*
    * CRC-32 lookup tables generated by the polynomial 0xEDB88320.
    * See also TestPureJavaCrc32.Table.
    */
-  private static final int T8_0_start = 0*256;
-  private static final int T8_1_start = 1*256;
-  private static final int T8_2_start = 2*256;
-  private static final int T8_3_start = 3*256;
-  private static final int T8_4_start = 4*256;
-  private static final int T8_5_start = 5*256;
-  private static final int T8_6_start = 6*256;
-  private static final int T8_7_start = 7*256;
-
   private static final int[] T = new int[] {
-  	/* T8_0 */
+    /* T8_0 */
     0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 
     0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 
     0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java Mon Jul  7 20:43:56 2014
@@ -126,6 +126,9 @@ abstract public class Shell {
   public static final boolean LINUX   = (osType == OSType.OS_TYPE_LINUX);
   public static final boolean OTHER   = (osType == OSType.OS_TYPE_OTHER);
 
+  public static final boolean PPC_64
+                = System.getProperties().getProperty("os.arch").contains("ppc64");
+
   /** a Unix command to get the current user's groups list */
   public static String[] getGroupsCommand() {
     return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
@@ -618,7 +621,7 @@ abstract public class Shell {
    * This is an IOException with exit code added.
    */
   public static class ExitCodeException extends IOException {
-    int exitCode;
+    private final int exitCode;
     
     public ExitCodeException(int exitCode, String message) {
       super(message);
@@ -628,6 +631,16 @@ abstract public class Shell {
     public int getExitCode() {
       return exitCode;
     }
+
+    @Override
+    public String toString() {
+      final StringBuilder sb =
+          new StringBuilder("ExitCodeException ");
+      sb.append("exitCode=").append(exitCode)
+        .append(": ");
+      sb.append(super.getMessage());
+      return sb.toString();
+    }
   }
   
   /**

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Mon Jul  7 20:43:56 2014
@@ -94,6 +94,98 @@
   </description>
 </property>
 
+<!-- 
+=== Multiple group mapping providers configuration sample === 
+  This sample illustrates a typical use case for CompositeGroupsMapping where
+Hadoop authentication uses MIT Kerberos which trusts an AD realm. In this case, service
+principals such as hdfs, mapred, hbase, hive, oozie and etc can be placed in In MIT Kerberos,
+but end users are just from the trusted AD. For the service principals, ShellBasedUnixGroupsMapping
+provider can be used to query their groups for efficiency, and for end users, LdapGroupsMapping 
+provider can be used. This avoids to add group entries in AD for service principals when only using 
+LdapGroupsMapping provider.
+  In case multiple ADs are involved and trusted by the MIT Kerberos in this use case, LdapGroupsMapping
+provider can be used more times with different AD specific configurations. This sample also shows how
+to do that. Here are the necessary configurations.
+
+<property>
+  <name>hadoop.security.group.mapping</name>
+  <value>org.apache.hadoop.security.CompositeGroupsMapping</value>
+  <description>
+    Class for user to group mapping (get groups for a given user) for ACL, which 
+    makes use of other multiple providers to provide the service.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.providers</name>
+  <value>shell4services,ad4usersX,ad4usersY</value>
+  <description>
+    Comma separated of names of other providers to provide user to group mapping. 
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.providers.combined</name>
+  <value>true</value>
+  <description>
+    true or false to indicate whether groups from the providers are combined or not. The default value is true
+    If true, then all the providers will be tried to get groups and all the groups are combined to return as
+    the final results. Otherwise, providers are tried one by one in the configured list order, and if any
+    groups are retrieved from any provider, then the groups will be returned without trying the left ones.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.provider.shell4services</name>
+  <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+  <description>
+    Class for group mapping provider named by 'shell4services'. The name can then be referenced 
+    by hadoop.security.group.mapping.providers property.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.provider.ad4usersX</name>
+  <value>org.apache.hadoop.security.LdapGroupsMapping</value>
+  <description>
+    Class for group mapping provider named by 'ad4usersX'. The name can then be referenced 
+    by hadoop.security.group.mapping.providers property.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.provider.ad4usersY</name>
+  <value>org.apache.hadoop.security.LdapGroupsMapping</value>
+  <description>
+    Class for group mapping provider named by 'ad4usersY'. The name can then be referenced 
+    by hadoop.security.group.mapping.providers property.
+  </description>
+</property>
+
+<property>
+<name>hadoop.security.group.mapping.provider.ad4usersX.ldap.url</name>
+<value>ldap://ad-host-for-users-X:389</value>
+  <description>
+    ldap url for the provider named by 'ad4usersX'. Note this property comes from 
+    'hadoop.security.group.mapping.ldap.url'.
+  </description>
+</property>
+
+<property>
+<name>hadoop.security.group.mapping.provider.ad4usersY.ldap.url</name>
+<value>ldap://ad-host-for-users-Y:389</value>
+  <description>
+    ldap url for the provider named by 'ad4usersY'. Note this property comes from 
+    'hadoop.security.group.mapping.ldap.url'.
+  </description>
+</property>
+
+You also need to configure other properties like
+  hadoop.security.group.mapping.ldap.bind.password.file and etc.
+for ldap providers in the same way as above does.
+
+-->
+ 
 <property>
   <name>hadoop.security.groups.cache.secs</name>
   <value>300</value>

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm Mon Jul  7 20:43:56 2014
@@ -86,6 +86,14 @@ jvm context
 *-------------------------------------+--------------------------------------+
 |<<<LogInfo>>> | Total number of INFO logs
 *-------------------------------------+--------------------------------------+
+|<<<GcNumWarnThresholdExceeded>>> | Number of times that the GC warn
+                                  | threshold is exceeded
+*-------------------------------------+--------------------------------------+
+|<<<GcNumInfoThresholdExceeded>>> | Number of times that the GC info
+                                  | threshold is exceeded
+*-------------------------------------+--------------------------------------+
+|<<<GcTotalExtraSleepTime>>> | Total GC extra sleep time in msec
+*-------------------------------------+--------------------------------------+
 
 rpc context
 

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm Mon Jul  7 20:43:56 2014
@@ -236,6 +236,25 @@ KVNO Timestamp         Principal
   </property>
 ----
 
+  The <<<hadoop.proxyuser.${superuser}.hosts>>> accepts list of ip addresses,
+  ip address ranges in CIDR format and/or host names.
+  
+  For example, by specifying as below in core-site.xml,
+  user named <<<oozie>>> accessing from hosts in the range 
+  10.222.0.0-15 and 10.113.221.221
+  can impersonate any user belonging to any group.
+  
+  ----
+  <property>
+    <name>hadoop.proxyuser.oozie.hosts</name>
+    <value>10.222.0.0/16,10.113.221.221</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.oozie.groups</name>
+    <value>*</value>
+  </property>
+----
+
 ** Secure DataNode
 
   Because the data transfer protocol of DataNode

Modified: hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm Mon Jul  7 20:43:56 2014
@@ -100,11 +100,15 @@ security.ha.service.protocol.acl      | 
    Example: <<<user1,user2 group1,group2>>>.
 
    Add a blank at the beginning of the line if only a list of groups is to
-   be provided, equivalently a comman-separated list of users followed by
+   be provided, equivalently a comma-separated list of users followed by
    a space or nothing implies only a set of given users.
 
    A special value of <<<*>>> implies that all users are allowed to access the
-   service.
+   service. 
+   
+   If access control list is not defined for a service, the value of
+   <<<security.service.authorization.default.acl>>> is applied. If 
+   <<<security.service.authorization.default.acl>>> is not defined, <<<*>>>  is applied.
 
 ** Refreshing Service Level Authorization Configuration
 



Mime
View raw message