hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [02/10] hadoop git commit: HDFS-8979. Clean up checkstyle warnings in hadoop-hdfs-client module. Contributed by Mingliang Liu.
Date Sat, 03 Oct 2015 18:38:53 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
index 38cf22b..fd5dbfc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Time;
 
@@ -87,7 +86,7 @@ public class ShortCircuitReplica {
    * If non-null, the shared memory slot associated with this replica.
    */
   private final Slot slot;
-  
+
   /**
    * Current mmap state.
    *
@@ -171,14 +170,14 @@ public class ShortCircuitReplica {
       }
     }
   }
-  
+
   /**
    * Try to add a no-checksum anchor to our shared memory slot.
    *
    * It is only possible to add this anchor when the block is mlocked on the Datanode.
    * The DataNode will not munlock the block until the number of no-checksum anchors
    * for the block reaches zero.
-   * 
+   *
    * This method does not require any synchronization.
    *
    * @return     True if we successfully added a no-checksum anchor.
@@ -233,7 +232,7 @@ public class ShortCircuitReplica {
    */
   void close() {
     String suffix = "";
-    
+
     Preconditions.checkState(refCount == 0,
         "tried to close replica with refCount %d: %s", refCount, this);
     refCount = -1;
@@ -278,7 +277,7 @@ public class ShortCircuitReplica {
   MappedByteBuffer loadMmapInternal() {
     try {
       FileChannel channel = dataStream.getChannel();
-      MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0, 
+      MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0,
           Math.min(Integer.MAX_VALUE, channel.size()));
       LOG.trace("{}: created mmap of size {}", this, channel.size());
       return mmap;
@@ -325,13 +324,10 @@ public class ShortCircuitReplica {
    */
   @Override
   public String toString() {
-    return new StringBuilder().append("ShortCircuitReplica{").
-        append("key=").append(key).
-        append(", metaHeader.version=").append(metaHeader.getVersion()).
-        append(", metaHeader.checksum=").append(metaHeader.getChecksum()).
-        append(", ident=").append("0x").
-          append(Integer.toHexString(System.identityHashCode(this))).
-        append(", creationTimeMs=").append(creationTimeMs).
-        append("}").toString();
+    return "ShortCircuitReplica{" + "key=" + key
+        + ", metaHeader.version=" + metaHeader.getVersion()
+        + ", metaHeader.checksum=" + metaHeader.getChecksum()
+        + ", ident=" + "0x" + Integer.toHexString(System.identityHashCode(this))
+        + ", creationTimeMs=" + creationTimeMs + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
index ef0019f..cb466c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 public final class ShortCircuitReplicaInfo {
   private final ShortCircuitReplica replica;
-  private final InvalidToken exc; 
+  private final InvalidToken exc;
 
   public ShortCircuitReplicaInfo() {
     this.replica = null;
@@ -43,9 +43,9 @@ public final class ShortCircuitReplicaInfo {
   }
 
   public InvalidToken getInvalidTokenException() {
-    return exc; 
+    return exc;
   }
-  
+
   public String toString() {
     StringBuilder builder = new StringBuilder();
     String prefix = "";
@@ -56,9 +56,8 @@ public final class ShortCircuitReplicaInfo {
     }
     if (exc != null) {
       builder.append(prefix).append(exc);
-      prefix = ", ";
     }
     builder.append("}");
     return builder.toString();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
index fa40c15..fb0e06f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -43,11 +43,14 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.ComparisonChain;
 import com.google.common.primitives.Ints;
 
+import javax.annotation.Nonnull;
+
 /**
  * A shared memory segment used to implement short-circuit reads.
  */
 public class ShortCircuitShm {
-  private static final Logger LOG = LoggerFactory.getLogger(ShortCircuitShm.class);
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ShortCircuitShm.class);
 
   protected static final int BYTES_PER_SLOT = 64;
 
@@ -92,7 +95,7 @@ public class ShortCircuitShm {
 
     /**
      * Generate a random ShmId.
-     * 
+     *
      * We generate ShmIds randomly to prevent a malicious client from
      * successfully guessing one and using that to interfere with another
      * client.
@@ -105,11 +108,11 @@ public class ShortCircuitShm {
       this.hi = hi;
       this.lo = lo;
     }
-    
+
     public long getHi() {
       return hi;
     }
-    
+
     public long getLo() {
       return lo;
     }
@@ -140,13 +143,13 @@ public class ShortCircuitShm {
     }
 
     @Override
-    public int compareTo(ShmId other) {
+    public int compareTo(@Nonnull ShmId other) {
       return ComparisonChain.start().
           compare(hi, other.hi).
           compare(lo, other.lo).
           result();
     }
-  };
+  }
 
   /**
    * Uniquely identifies a slot.
@@ -154,7 +157,7 @@ public class ShortCircuitShm {
   public static class SlotId {
     private final ShmId shmId;
     private final int slotIdx;
-    
+
     public SlotId(ShmId shmId, int slotIdx) {
       this.shmId = shmId;
       this.slotIdx = slotIdx;
@@ -222,7 +225,7 @@ public class ShortCircuitShm {
           "doesn't support removal");
     }
   }
-  
+
   /**
    * A slot containing information about a replica.
    *
@@ -239,11 +242,11 @@ public class ShortCircuitShm {
    */
   public class Slot {
     /**
-     * Flag indicating that the slot is valid.  
-     * 
+     * Flag indicating that the slot is valid.
+     *
      * The DFSClient sets this flag when it allocates a new slot within one of
      * its shared memory regions.
-     * 
+     *
      * The DataNode clears this flag when the replica associated with this slot
      * is no longer valid.  The client itself also clears this flag when it
      * believes that the DataNode is no longer using this slot to communicate.
@@ -340,7 +343,7 @@ public class ShortCircuitShm {
       } while (!unsafe.compareAndSwapLong(null, this.slotAddress,
                   prev, prev & (~flag)));
     }
-    
+
     public boolean isValid() {
       return isSet(VALID_FLAG);
     }
@@ -367,11 +370,8 @@ public class ShortCircuitShm {
 
     public boolean isAnchored() {
       long prev = unsafe.getLongVolatile(null, this.slotAddress);
-      if ((prev & VALID_FLAG) == 0) {
-        // Slot is no longer valid.
-        return false;
-      }
-      return ((prev & 0x7fffffff) != 0);
+      // Slot is no longer valid.
+      return (prev & VALID_FLAG) != 0 && ((prev & 0x7fffffff) != 0);
     }
 
     /**
@@ -452,11 +452,11 @@ public class ShortCircuitShm {
 
   /**
    * Create the ShortCircuitShm.
-   * 
+   *
    * @param shmId       The ID to use.
-   * @param stream      The stream that we're going to use to create this 
+   * @param stream      The stream that we're going to use to create this
    *                    shared memory segment.
-   *                    
+   *
    *                    Although this is a FileInputStream, we are going to
    *                    assume that the underlying file descriptor is writable
    *                    as well as readable. It would be more appropriate to use
@@ -480,7 +480,7 @@ public class ShortCircuitShm {
     }
     this.shmId = shmId;
     this.mmappedLength = getUsableLength(stream);
-    this.baseAddress = POSIX.mmap(stream.getFD(), 
+    this.baseAddress = POSIX.mmap(stream.getFD(),
         POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, mmappedLength);
     this.slots = new Slot[mmappedLength / BYTES_PER_SLOT];
     this.allocatedSlots = new BitSet(slots.length);
@@ -492,7 +492,7 @@ public class ShortCircuitShm {
   public final ShmId getShmId() {
     return shmId;
   }
-  
+
   /**
    * Determine if this shared memory object is empty.
    *
@@ -517,7 +517,7 @@ public class ShortCircuitShm {
    * @param slotIdx   Index of the slot.
    * @return          The base address of the slot.
    */
-  private final long calculateSlotAddress(int slotIdx) {
+  private long calculateSlotAddress(int slotIdx) {
     long offset = slotIdx;
     offset *= BYTES_PER_SLOT;
     return this.baseAddress + offset;
@@ -562,7 +562,7 @@ public class ShortCircuitShm {
    * Register a slot.
    *
    * This function looks at a slot which has already been initialized (by
-   * another process), and registers it with us.  Then, it returns the 
+   * another process), and registers it with us.  Then, it returns the
    * relevant Slot object.
    *
    * @return    The slot.
@@ -601,7 +601,7 @@ public class ShortCircuitShm {
 
   /**
    * Unregisters a slot.
-   * 
+   *
    * This doesn't alter the contents of the slot.  It just means
    *
    * @param slotIdx  Index of the slot to unregister.
@@ -613,11 +613,11 @@ public class ShortCircuitShm {
     slots[slotIdx] = null;
     LOG.trace("{}: unregisterSlot {}", this, slotIdx);
   }
-  
+
   /**
    * Iterate over all allocated slots.
-   * 
-   * Note that this method isn't safe if 
+   *
+   * Note that this method isn't safe if
    *
    * @return        The slot iterator.
    */
@@ -633,7 +633,7 @@ public class ShortCircuitShm {
     }
     LOG.trace(this + ": freed");
   }
-  
+
   @Override
   public String toString() {
     return this.getClass().getSimpleName() + "(" + shmId + ")";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
index 31d4dcc..b0b19b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
@@ -46,4 +46,4 @@ public class ByteBufferOutputStream extends OutputStream {
   public void write(byte[] b, int off, int len) throws IOException {
     buf.put(b, off, len);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
index 17365fb..a0c59ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.base.Preconditions;
 
+import javax.annotation.Nonnull;
+
 /**
  * An InputStream implementations which reads from some other InputStream
  * but expects an exact number of bytes. Any attempts to read past the
@@ -42,10 +44,10 @@ public class ExactSizeInputStream extends FilterInputStream {
   /**
    * Construct an input stream that will read no more than
    * 'numBytes' bytes.
-   * 
+   *
    * If an EOF occurs on the underlying stream before numBytes
    * bytes have been read, an EOFException will be thrown.
-   * 
+   *
    * @param in the inputstream to wrap
    * @param numBytes the number of bytes to read
    */
@@ -80,7 +82,7 @@ public class ExactSizeInputStream extends FilterInputStream {
   }
 
   @Override
-  public int read(final byte[] b, final int off, int len)
+  public int read(@Nonnull final byte[] b, final int off, int len)
                   throws IOException {
     if (remaining <= 0) {
       return -1;
@@ -111,7 +113,7 @@ public class ExactSizeInputStream extends FilterInputStream {
     }
     return result;
   }
-  
+
   @Override
   public boolean markSupported() {
     return false;
@@ -121,5 +123,5 @@ public class ExactSizeInputStream extends FilterInputStream {
   public void mark(int readlimit) {
     throw new UnsupportedOperationException();
   }
-  
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 9399d84..51ad08f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -38,7 +38,8 @@ public class LongBitFormat implements Serializable {
   /** Bit mask */
   private final long MASK;
 
-  public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
+  public LongBitFormat(String name, LongBitFormat previous, int length,
+                       long min) {
     NAME = name;
     OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
     LENGTH = length;
@@ -64,7 +65,7 @@ public class LongBitFormat implements Serializable {
     }
     return (record & ~MASK) | (value << OFFSET);
   }
-  
+
   public long getMin() {
     return MIN;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 9153745..4977cc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -171,7 +171,7 @@ public class StripedBlockUtil {
   private static int lastCellSize(int size, int cellSize, int numDataBlocks,
       int i) {
     if (i < numDataBlocks) {
-      // parity block size (i.e. i >= numDataBlocks) is the same as 
+      // parity block size (i.e. i >= numDataBlocks) is the same as
       // the first data block size (i.e. i = 0).
       size -= i*cellSize;
       if (size < 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java
index 5ba3de0..28adccb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java
@@ -15,4 +15,4 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.util;
\ No newline at end of file
+package org.apache.hadoop.hdfs.util;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
index 911d6a4..4233147 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.fs.FSInputStream;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.net.HttpHeaders;
 
+import javax.annotation.Nonnull;
+
 /**
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * created each time. This class hides the complexity of those multiple
@@ -101,24 +103,24 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   }
 
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
-      ) throws IOException;
+  ) throws IOException;
 
   @VisibleForTesting
   protected InputStream getInputStream() throws IOException {
     switch (status) {
-      case NORMAL:
-        break;
-      case SEEK:
-        if (in != null) {
-          in.close();
-        }
-        InputStreamAndFileLength fin = openInputStream(startPos);
-        in = fin.in;
-        fileLength = fin.length;
-        status = StreamStatus.NORMAL;
-        break;
-      case CLOSED:
-        throw new IOException("Stream closed");
+    case NORMAL:
+      break;
+    case SEEK:
+      if (in != null) {
+        in.close();
+      }
+      InputStreamAndFileLength fin = openInputStream(startPos);
+      in = fin.in;
+      fileLength = fin.length;
+      status = StreamStatus.NORMAL;
+      break;
+    case CLOSED:
+      throw new IOException("Stream closed");
     }
     return in;
   }
@@ -199,7 +201,7 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   }
 
   @Override
-  public int read(byte b[], int off, int len) throws IOException {
+  public int read(@Nonnull byte b[], int off, int len) throws IOException {
     return update(getInputStream().read(b, off, len));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index d938997..d40adad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -60,7 +60,8 @@ class JsonUtilClient {
 
   /** Convert a Json map to a RemoteException. */
   static RemoteException toRemoteException(final Map<?, ?> json) {
-    final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName());
+    final Map<?, ?> m = (Map<?, ?>)json.get(
+        RemoteException.class.getSimpleName());
     final String message = (String)m.get("message");
     final String javaClassName = (String)m.get("javaClassName");
     return new RemoteException(javaClassName, message);
@@ -100,7 +101,8 @@ class JsonUtilClient {
   }
 
   /** Convert a Json map to a HdfsFileStatus object. */
-  static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
+  static HdfsFileStatus toFileStatus(final Map<?, ?> json,
+      boolean includesType) {
     if (json == null) {
       return null;
     }
@@ -108,7 +110,8 @@ class JsonUtilClient {
     final Map<?, ?> m = includesType ?
         (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
     final String localName = (String) m.get("pathSuffix");
-    final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
+    final WebHdfsConstants.PathType type =
+        WebHdfsConstants.PathType.valueOf((String) m.get("type"));
     final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
         : DFSUtilClient.string2Bytes((String) m.get("symlink"));
 
@@ -116,20 +119,21 @@ class JsonUtilClient {
     final String owner = (String) m.get("owner");
     final String group = (String) m.get("group");
     final FsPermission permission = toFsPermission((String) m.get("permission"),
-                                                   (Boolean) m.get("aclBit"),
-                                                   (Boolean) m.get("encBit"));
+        (Boolean) m.get("aclBit"),
+        (Boolean) m.get("encBit"));
     final long aTime = ((Number) m.get("accessTime")).longValue();
     final long mTime = ((Number) m.get("modificationTime")).longValue();
     final long blockSize = ((Number) m.get("blockSize")).longValue();
     final short replication = ((Number) m.get("replication")).shortValue();
     final long fileId = m.containsKey("fileId") ?
-        ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID;
+        ((Number) m.get("fileId")).longValue() :
+        HdfsConstants.GRANDFATHER_INODE_ID;
     final int childrenNum = getInt(m, "childrenNum", -1);
     final byte storagePolicy = m.containsKey("storagePolicy") ?
         (byte) ((Number) m.get("storagePolicy")).longValue() :
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-    return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
-        blockSize, mTime, aTime, permission, owner, group,
+    return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY,
+        replication, blockSize, mTime, aTime, permission, owner, group,
         symlink, DFSUtilClient.string2Bytes(localName),
         fileId, childrenNum, null,
         storagePolicy, null);
@@ -185,7 +189,7 @@ class JsonUtilClient {
 
   /** Convert a Json map to an DatanodeInfo object. */
   static DatanodeInfo toDatanodeInfo(final Map<?, ?> m)
-    throws IOException {
+      throws IOException {
     if (m == null) {
       return null;
     }
@@ -304,7 +308,8 @@ class JsonUtilClient {
       return null;
     }
 
-    final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName());
+    final Map<?, ?> m = (Map<?, ?>)json.get(
+        ContentSummary.class.getSimpleName());
     final long length = ((Number) m.get("length")).longValue();
     final long fileCount = ((Number) m.get("fileCount")).longValue();
     final long directoryCount = ((Number) m.get("directoryCount")).longValue();
@@ -312,9 +317,13 @@ class JsonUtilClient {
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 
-    return new ContentSummary.Builder().length(length).fileCount(fileCount).
-        directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).
-        spaceQuota(spaceQuota).build();
+    return new ContentSummary.Builder()
+        .length(length)
+        .fileCount(fileCount)
+        .directoryCount(directoryCount)
+        .quota(quota)
+        .spaceConsumed(spaceConsumed)
+        .spaceQuota(spaceQuota).build();
   }
 
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
@@ -329,21 +338,22 @@ class JsonUtilClient {
     final int length = ((Number) m.get("length")).intValue();
     final byte[] bytes = StringUtils.hexStringToByte((String) m.get("bytes"));
 
-    final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
+    final DataInputStream in = new DataInputStream(
+        new ByteArrayInputStream(bytes));
     final DataChecksum.Type crcType =
         MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm);
     final MD5MD5CRC32FileChecksum checksum;
 
     // Recreate what DFSClient would have returned.
     switch(crcType) {
-      case CRC32:
-        checksum = new MD5MD5CRC32GzipFileChecksum();
-        break;
-      case CRC32C:
-        checksum = new MD5MD5CRC32CastagnoliFileChecksum();
-        break;
-      default:
-        throw new IOException("Unknown algorithm: " + algorithm);
+    case CRC32:
+      checksum = new MD5MD5CRC32GzipFileChecksum();
+      break;
+    case CRC32C:
+      checksum = new MD5MD5CRC32CastagnoliFileChecksum();
+      break;
+    default:
+      throw new IOException("Unknown algorithm: " + algorithm);
     }
     checksum.readFields(in);
 
@@ -390,14 +400,12 @@ class JsonUtilClient {
     return aclStatusBuilder.build();
   }
 
-  static String getPath(final Map<?, ?> json)
-      throws IOException {
+  static String getPath(final Map<?, ?> json) {
     if (json == null) {
       return null;
     }
 
-    String path = (String) json.get("Path");
-    return path;
+    return (String) json.get("Path");
   }
 
   static byte[] getXAttr(final Map<?, ?> json, final String name)
@@ -446,7 +454,7 @@ class JsonUtilClient {
     ObjectReader reader = new ObjectMapper().reader(List.class);
     final List<Object> xattrs = reader.readValue(namesInJson);
     final List<String> names =
-      Lists.newArrayListWithCapacity(json.keySet().size());
+        Lists.newArrayListWithCapacity(json.keySet().size());
 
     for (Object xattr : xattrs) {
       names.add((String) xattr);
@@ -495,7 +503,8 @@ class JsonUtilClient {
       return null;
     }
 
-    final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName());
+    final Map<?, ?> m = (Map<?, ?>)json.get(
+        LocatedBlocks.class.getSimpleName());
     final long fileLength = ((Number) m.get("fileLength")).longValue();
     final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
     final List<LocatedBlock> locatedBlocks = toLocatedBlockList(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index be5f17d..8810ac4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -54,38 +54,40 @@ public class URLConnectionFactory {
   /**
    * Timeout for socket connects and reads
    */
-  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+  public final static int DEFAULT_SOCKET_TIMEOUT = 60 * 1000; // 1 minute
   private final ConnectionConfigurator connConfigurator;
 
-  private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() {
-    @Override
-    public HttpURLConnection configure(HttpURLConnection conn)
-        throws IOException {
-      URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
-      return conn;
-    }
-  };
+  private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR
+      = new ConnectionConfigurator() {
+        @Override
+        public HttpURLConnection configure(HttpURLConnection conn)
+            throws IOException {
+          URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
+          return conn;
+        }
+      };
 
   /**
    * The URLConnectionFactory that sets the default timeout and it only trusts
    * Java's SSL certificates.
    */
-  public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory(
-      DEFAULT_TIMEOUT_CONN_CONFIGURATOR);
+  public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY =
+      new URLConnectionFactory(DEFAULT_TIMEOUT_CONN_CONFIGURATOR);
 
   /**
    * Construct a new URLConnectionFactory based on the configuration. It will
    * try to load SSL certificates when it is specified.
    */
-  public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
+  public static URLConnectionFactory newDefaultURLConnectionFactory(
+      Configuration conf) {
     ConnectionConfigurator conn = getSSLConnectionConfiguration(conf);
 
     return new URLConnectionFactory(conn);
   }
 
-  private static ConnectionConfigurator
-      getSSLConnectionConfiguration(Configuration conf) {
-    ConnectionConfigurator conn = null;
+  private static ConnectionConfigurator getSSLConnectionConfiguration(
+      Configuration conf) {
+    ConnectionConfigurator conn;
     try {
       conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
     } catch (Exception e) {
@@ -103,9 +105,9 @@ public class URLConnectionFactory {
    * Construct a new URLConnectionFactory that supports OAut-based connections.
    * It will also try to load the SSL configuration when they are specified.
    */
-  public static URLConnectionFactory
-      newOAuth2URLConnectionFactory(Configuration conf) throws IOException {
-    ConnectionConfigurator conn = null;
+  public static URLConnectionFactory newOAuth2URLConnectionFactory(
+      Configuration conf) throws IOException {
+    ConnectionConfigurator conn;
     try {
       ConnectionConfigurator sslConnConfigurator
           = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
@@ -125,8 +127,9 @@ public class URLConnectionFactory {
   /**
    * Create a new ConnectionConfigurator for SSL connections
    */
-  private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
-      Configuration conf) throws IOException, GeneralSecurityException {
+  private static ConnectionConfigurator newSslConnConfigurator(
+      final int timeout, Configuration conf)
+      throws IOException, GeneralSecurityException {
     final SSLFactory factory;
     final SSLSocketFactory sf;
     final HostnameVerifier hv;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index e122748..16d41a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -89,15 +89,20 @@ import com.google.common.collect.Lists;
 
 /** A FileSystem for HDFS over the web. */
 public class WebHdfsFileSystem extends FileSystem
-    implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
+    implements DelegationTokenRenewer.Renewable,
+    TokenAspect.TokenManagementDelegator {
   public static final Logger LOG = LoggerFactory
       .getLogger(WebHdfsFileSystem.class);
   /** WebHdfs version. */
   public static final int VERSION = 1;
   /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
-  public static final String PATH_PREFIX = "/" + WebHdfsConstants.WEBHDFS_SCHEME + "/v" + VERSION;
+  public static final String PATH_PREFIX = "/" + WebHdfsConstants.WEBHDFS_SCHEME
+      + "/v" + VERSION;
 
-  /** Default connection factory may be overridden in tests to use smaller timeout values */
+  /**
+   * Default connection factory may be overridden in tests to use smaller
+   * timeout values
+   */
   protected URLConnectionFactory connectionFactory;
 
   @VisibleForTesting
@@ -141,7 +146,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public synchronized void initialize(URI uri, Configuration conf
-      ) throws IOException {
+  ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
     /** set user pattern based on configuration file */
@@ -207,7 +212,7 @@ public class WebHdfsFileSystem extends FileSystem
               failoverSleepMaxMillis);
     }
 
-    this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi)));
+    this.workingDir = makeQualified(getHomeDirectory());
     this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
     this.disallowFallbackToInsecureCluster = !conf.getBoolean(
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
@@ -317,7 +322,7 @@ public class WebHdfsFileSystem extends FileSystem
     String result = absolutePath.toUri().getPath();
     if (!DFSUtilClient.isValidName(result)) {
       throw new IllegalArgumentException("Invalid DFS directory name " +
-                                         result);
+          result);
     }
     workingDir = absolutePath;
   }
@@ -326,14 +331,16 @@ public class WebHdfsFileSystem extends FileSystem
     return f.isAbsolute()? f: new Path(workingDir, f);
   }
 
-  static Map<?, ?> jsonParse(final HttpURLConnection c, final boolean useErrorStream
-      ) throws IOException {
+  static Map<?, ?> jsonParse(final HttpURLConnection c,
+      final boolean useErrorStream) throws IOException {
     if (c.getContentLength() == 0) {
       return null;
     }
-    final InputStream in = useErrorStream? c.getErrorStream(): c.getInputStream();
+    final InputStream in = useErrorStream ?
+        c.getErrorStream() : c.getInputStream();
     if (in == null) {
-      throw new IOException("The " + (useErrorStream? "error": "input") + " stream is null.");
+      throw new IOException("The " + (useErrorStream? "error": "input") +
+          " stream is null.");
     }
     try {
       final String contentType = c.getContentType();
@@ -353,7 +360,8 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
-      final HttpURLConnection conn, boolean unwrapException) throws IOException {
+      final HttpURLConnection conn, boolean unwrapException)
+      throws IOException {
     final int code = conn.getResponseCode();
     // server is demanding an authentication we don't support
     if (code == HttpURLConnection.HTTP_UNAUTHORIZED) {
@@ -435,7 +443,7 @@ public class WebHdfsFileSystem extends FileSystem
   private URL getNamenodeURL(String path, String query) throws IOException {
     InetSocketAddress nnAddr = getCurrentNNAddr();
     final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
-          nnAddr.getPort(), path + '?' + query);
+        nnAddr.getPort(), path + '?' + query);
     LOG.trace("url={}", url);
     return url;
   }
@@ -484,7 +492,8 @@ public class WebHdfsFileSystem extends FileSystem
 
     protected final HttpOpParam.Op op;
     private final boolean redirected;
-    protected ExcludeDatanodesParam excludeDatanodes = new ExcludeDatanodesParam("");
+    protected ExcludeDatanodesParam excludeDatanodes =
+        new ExcludeDatanodesParam("");
 
     private boolean checkRetry;
 
@@ -521,7 +530,8 @@ public class WebHdfsFileSystem extends FileSystem
      *
      * Create/Append:
      * Step 1) Submit a Http request with neither auto-redirect nor data.
-     * Step 2) Submit another Http request with the URL from the Location header with data.
+     * Step 2) Submit another Http request with the URL from the Location header
+     * with data.
      *
      * The reason of having two-step create/append is for preventing clients to
      * send out the data before the redirect. This issue is addressed by the
@@ -580,26 +590,25 @@ public class WebHdfsFileSystem extends FileSystem
       conn.setRequestMethod(op.getType().toString());
       conn.setInstanceFollowRedirects(false);
       switch (op.getType()) {
-        // if not sending a message body for a POST or PUT operation, need
-        // to ensure the server/proxy knows this
-        case POST:
-        case PUT: {
-          conn.setDoOutput(true);
-          if (!doOutput) {
-            // explicitly setting content-length to 0 won't do spnego!!
-            // opening and closing the stream will send "Content-Length: 0"
-            conn.getOutputStream().close();
-          } else {
-            conn.setRequestProperty("Content-Type",
-                MediaType.APPLICATION_OCTET_STREAM);
-            conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
-          }
-          break;
-        }
-        default: {
-          conn.setDoOutput(doOutput);
-          break;
+      // if not sending a message body for a POST or PUT operation, need
+      // to ensure the server/proxy knows this
+      case POST:
+      case PUT: {
+        conn.setDoOutput(true);
+        if (!doOutput) {
+          // explicitly setting content-length to 0 won't do spnego!!
+          // opening and closing the stream will send "Content-Length: 0"
+          conn.getOutputStream().close();
+        } else {
+          conn.setRequestProperty("Content-Type",
+              MediaType.APPLICATION_OCTET_STREAM);
+          conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
         }
+        break;
+      }
+      default:
+        conn.setDoOutput(doOutput);
+        break;
       }
       conn.connect();
       return conn;
@@ -649,21 +658,22 @@ public class WebHdfsFileSystem extends FileSystem
     }
 
     private void shouldRetry(final IOException ioe, final int retry
-        ) throws IOException {
+    ) throws IOException {
       InetSocketAddress nnAddr = getCurrentNNAddr();
       if (checkRetry) {
         try {
           final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
               ioe, retry, 0, true);
 
-          boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY;
+          boolean isRetry =
+              a.action == RetryPolicy.RetryAction.RetryDecision.RETRY;
           boolean isFailoverAndRetry =
               a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
 
           if (isRetry || isFailoverAndRetry) {
             LOG.info("Retrying connect to namenode: {}. Already tried {}"
-                + " time(s); retry policy is {}, delay {}ms.", nnAddr, retry,
-                retryPolicy, a.delayMillis);
+                    + " time(s); retry policy is {}, delay {}ms.",
+                nnAddr, retry, retryPolicy, a.delayMillis);
 
             if (isFailoverAndRetry) {
               resetStateToFailOver();
@@ -786,7 +796,8 @@ public class WebHdfsFileSystem extends FileSystem
   /**
    * Handle create/append output streams
    */
-  class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
+  class FsPathOutputStreamRunner
+      extends AbstractFsPathRunner<FSDataOutputStream> {
     private final int bufferSize;
 
     FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
@@ -837,7 +848,8 @@ public class WebHdfsFileSystem extends FileSystem
       return url;
     }
 
-    protected URLRunner(final HttpOpParam.Op op, final URL url, boolean redirected) {
+    protected URLRunner(final HttpOpParam.Op op, final URL url,
+        boolean redirected) {
       super(op, redirected);
       this.url = url;
     }
@@ -911,7 +923,7 @@ public class WebHdfsFileSystem extends FileSystem
    * Create a symlink pointing to the destination path.
    */
   public void createSymlink(Path destination, Path f, boolean createParent
-      ) throws IOException {
+  ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK;
     new FsPathRunner(op, f,
@@ -1020,7 +1032,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public void setOwner(final Path p, final String owner, final String group
-      ) throws IOException {
+  ) throws IOException {
     if (owner == null && group == null) {
       throw new IOException("owner == null && group == null");
     }
@@ -1034,7 +1046,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public void setPermission(final Path p, final FsPermission permission
-      ) throws IOException {
+  ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
     new FsPathRunner(op, p,new PermissionParam(permission)).run();
@@ -1083,14 +1095,13 @@ public class WebHdfsFileSystem extends FileSystem
       throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
-    Path spath = new FsPathResponseRunner<Path>(op, path,
+    return new FsPathResponseRunner<Path>(op, path,
         new SnapshotNameParam(snapshotName)) {
       @Override
       Path decodeResponse(Map<?,?> json) {
         return new Path((String) json.get(Path.class.getSimpleName()));
       }
     }.run();
-    return spath;
   }
 
   @Override
@@ -1112,7 +1123,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public boolean setReplication(final Path p, final short replication
-     ) throws IOException {
+  ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
     return new FsPathBooleanRunner(op, p,
@@ -1122,7 +1133,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public void setTimes(final Path p, final long mtime, final long atime
-      ) throws IOException {
+  ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.SETTIMES;
     new FsPathRunner(op, p,
@@ -1213,7 +1224,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public FSDataInputStream open(final Path f, final int buffersize
-      ) throws IOException {
+  ) throws IOException {
     statistics.incrementReadOps(1);
     final HttpOpParam.Op op = GetOpParam.Op.OPEN;
     // use a runner so the open can recover from an invalid token
@@ -1311,7 +1322,7 @@ public class WebHdfsFileSystem extends FileSystem
     /** Remove offset parameter before returning the resolved url. */
     @Override
     protected URL getResolvedUrl(final HttpURLConnection connection
-        ) throws MalformedURLException {
+    ) throws MalformedURLException {
       return removeOffsetParam(connection.getURL());
     }
   }
@@ -1324,16 +1335,19 @@ public class WebHdfsFileSystem extends FileSystem
     return new FsPathResponseRunner<FileStatus[]>(op, f) {
       @Override
       FileStatus[] decodeResponse(Map<?,?> json) {
-        final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
+        final Map<?, ?> rootmap =
+            (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
         final List<?> array = JsonUtilClient.getList(rootmap,
-                                                     FileStatus.class.getSimpleName());
+            FileStatus.class.getSimpleName());
 
         //convert FileStatus
+        assert array != null;
         final FileStatus[] statuses = new FileStatus[array.size()];
         int i = 0;
         for (Object object : array) {
           final Map<?, ?> m = (Map<?, ?>) object;
-          statuses[i++] = makeQualified(JsonUtilClient.toFileStatus(m, false), f);
+          statuses[i++] = makeQualified(JsonUtilClient.toFileStatus(m, false),
+              f);
         }
         return statuses;
       }
@@ -1347,12 +1361,12 @@ public class WebHdfsFileSystem extends FileSystem
     Token<DelegationTokenIdentifier> token =
         new FsPathResponseRunner<Token<DelegationTokenIdentifier>>(
             op, null, new RenewerParam(renewer)) {
-      @Override
-      Token<DelegationTokenIdentifier> decodeResponse(Map<?,?> json)
-          throws IOException {
-        return JsonUtilClient.toDelegationToken(json);
-      }
-    }.run();
+          @Override
+          Token<DelegationTokenIdentifier> decodeResponse(Map<?,?> json)
+              throws IOException {
+            return JsonUtilClient.toDelegationToken(json);
+          }
+        }.run();
     if (token != null) {
       token.setService(tokenServiceName);
     } else {
@@ -1378,7 +1392,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public synchronized long renewDelegationToken(final Token<?> token
-      ) throws IOException {
+  ) throws IOException {
     final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN;
     return new FsPathResponseRunner<Long>(op, null,
         new TokenArgumentParam(token.encodeToUrlString())) {
@@ -1391,7 +1405,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public synchronized void cancelDelegationToken(final Token<?> token
-      ) throws IOException {
+  ) throws IOException {
     final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN;
     new FsPathRunner(op, null,
         new TokenArgumentParam(token.encodeToUrlString())
@@ -1444,7 +1458,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   @Override
   public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
-      ) throws IOException {
+  ) throws IOException {
     statistics.incrementReadOps(1);
 
     final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
@@ -1462,11 +1476,11 @@ public class WebHdfsFileSystem extends FileSystem
    * an HA cluster with its logical name, the resolver further resolves the
    * logical name(i.e., the authority in the URL) into real namenode addresses.
    */
-  private InetSocketAddress[] resolveNNAddr() throws IOException {
+  private InetSocketAddress[] resolveNNAddr() {
     Configuration conf = getConf();
     final String scheme = uri.getScheme();
 
-    ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
+    ArrayList<InetSocketAddress> ret = new ArrayList<>();
 
     if (!HAUtilClient.isLogicalUri(conf, uri)) {
       InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
index 3189f94..97414aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
@@ -39,7 +39,7 @@ public abstract class AccessTokenProvider implements Configurable {
    * Will be called for each connection, so implementations should be
    * performant. Implementations are responsible for any refreshing of
    * the token.
-   * 
+   *
    * @return Access token to be added to connection header.
    */
   public abstract String getAccessToken() throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
index aa05dd6..557d0fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
@@ -43,7 +43,7 @@ public class AccessTokenTimer {
   }
 
   /**
-   * 
+   *
    * @param timer Timer instance for unit testing
    */
   public AccessTokenTimer(Timer timer) {
@@ -51,7 +51,7 @@ public class AccessTokenTimer {
     this.nextRefreshMSSinceEpoch = 0;
   }
 
-  /** 
+  /**
    * Set when the access token will expire as reported by the oauth server,
    * ie in seconds from now.
    * @param expiresIn Access time expiration as reported by OAuth server
@@ -63,7 +63,7 @@ public class AccessTokenTimer {
   /**
    * Set when the access token will expire in milliseconds from epoch,
    * as required by the WebHDFS configuration.  This is a bit hacky and lame.
-   * 
+   *
    * @param expiresInMSSinceEpoch Access time expiration in ms since epoch.
    */
   public void setExpiresInMSSinceEpoch(String expiresInMSSinceEpoch){
@@ -72,13 +72,13 @@ public class AccessTokenTimer {
 
   /**
    * Get next time we should refresh the token.
-   * 
+   *
    * @return Next time since epoch we'll need to refresh the token.
    */
   public long getNextRefreshMSSinceEpoch() {
     return nextRefreshMSSinceEpoch;
   }
-  
+
   /**
    * Return true if the current token has expired or will expire within the
    * EXPIRE_BUFFER_MS (to give ample wiggle room for the call to be made to
@@ -89,7 +89,7 @@ public class AccessTokenTimer {
     long currTime = timer.now();
     return currTime > lowerLimit;
   }
-  
+
   /**
    * The expires_in param from OAuth is in seconds-from-now.  Convert to
    * milliseconds-from-epoch
@@ -99,5 +99,5 @@ public class AccessTokenTimer {
     long expiresMs = expiresSecs * 1000;
     return timer.now() + expiresMs;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
index b56dbde..bd03588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
@@ -27,7 +27,7 @@ import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
 
 /**
  * Obtain an access token via a a credential (provided through the
- * Configuration) using the 
+ * Configuration) using the
  * <a href="https://tools.ietf.org/html/rfc6749#section-4.4">
  *   Client Credentials Grant workflow</a>.
  */
@@ -56,7 +56,7 @@ public class ConfCredentialBasedAccessTokenProvider
       throw new IllegalArgumentException("Credential has not been " +
           "provided in configuration");
     }
-    
+
     return credential;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
index 1e80451..773eeae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
@@ -54,42 +54,42 @@ import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
 @InterfaceStability.Evolving
 public class ConfRefreshTokenBasedAccessTokenProvider
     extends AccessTokenProvider {
-  
+
   public static final String OAUTH_REFRESH_TOKEN_KEY
       = "dfs.webhdfs.oauth2.refresh.token";
   public static final String OAUTH_REFRESH_TOKEN_EXPIRES_KEY
       = "dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch";
 
   private AccessTokenTimer accessTokenTimer;
-  
+
   private String accessToken;
-  
+
   private String refreshToken;
-  
+
   private String clientId;
-  
+
   private String refreshURL;
 
-  
+
   public ConfRefreshTokenBasedAccessTokenProvider() {
     this.accessTokenTimer = new AccessTokenTimer();
   }
-  
+
   public ConfRefreshTokenBasedAccessTokenProvider(Timer timer) {
     this.accessTokenTimer = new AccessTokenTimer(timer);
   }
-  
+
   @Override
   public void setConf(Configuration conf) {
     super.setConf(conf);
     refreshToken = notNull(conf, (OAUTH_REFRESH_TOKEN_KEY));
-    
+
     accessTokenTimer.setExpiresInMSSinceEpoch(
         notNull(conf, OAUTH_REFRESH_TOKEN_EXPIRES_KEY));
 
     clientId = notNull(conf, OAUTH_CLIENT_ID_KEY);
     refreshURL = notNull(conf, OAUTH_REFRESH_URL_KEY);
-    
+
   }
 
   @Override
@@ -97,10 +97,10 @@ public class ConfRefreshTokenBasedAccessTokenProvider
     if(accessTokenTimer.shouldRefresh()) {
       refresh();
     }
-    
+
     return accessToken;
   }
-  
+
   void refresh() throws IOException {
     try {
       OkHttpClient client = new OkHttpClient();
@@ -139,7 +139,7 @@ public class ConfRefreshTokenBasedAccessTokenProvider
       throw new IOException("Exception while refreshing access token", e);
     }
   }
-  
+
   public String getRefreshToken() {
     return refreshToken;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
index 625d935..15cda88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
@@ -56,25 +56,25 @@ public abstract class CredentialBasedAccessTokenProvider
     extends AccessTokenProvider {
   public static final String OAUTH_CREDENTIAL_KEY
       = "dfs.webhdfs.oauth2.credential";
-  
+
   private AccessTokenTimer timer;
-  
+
   private String clientId;
-  
+
   private String refreshURL;
-  
+
   private String accessToken;
-  
+
   private boolean initialCredentialObtained = false;
 
   CredentialBasedAccessTokenProvider() {
     this.timer = new AccessTokenTimer();
   }
-  
+
   CredentialBasedAccessTokenProvider(Timer timer) {
     this.timer = new AccessTokenTimer(timer);
   }
-  
+
   public abstract String getCredential();
 
   @Override
@@ -90,10 +90,10 @@ public abstract class CredentialBasedAccessTokenProvider
       refresh();
       initialCredentialObtained = true;
     }
-    
+
     return accessToken;
   }
-  
+
   void refresh() throws IOException {
     try {
       OkHttpClient client = new OkHttpClient();
@@ -122,7 +122,7 @@ public abstract class CredentialBasedAccessTokenProvider
       ObjectMapper mapper = new ObjectMapper();
       Map<?, ?> response = mapper.reader(Map.class)
           .readValue(responseBody.body().string());
-      
+
       String newExpiresIn = response.get(EXPIRES_IN).toString();
       timer.setExpiresIn(newExpiresIn);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
index f334b24..07ab23a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
@@ -36,44 +36,44 @@ import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class OAuth2ConnectionConfigurator implements ConnectionConfigurator {
-  
+
   public static final String HEADER = "Bearer ";
-  
+
   private final AccessTokenProvider accessTokenProvider;
- 
+
   private ConnectionConfigurator sslConfigurator = null;
-  
+
   public OAuth2ConnectionConfigurator(Configuration conf) {
     this(conf, null);
   }
-  
+
   @SuppressWarnings("unchecked")
   public OAuth2ConnectionConfigurator(Configuration conf,
                                       ConnectionConfigurator sslConfigurator) {
     this.sslConfigurator = sslConfigurator;
-    
+
     notNull(conf, ACCESS_TOKEN_PROVIDER_KEY);
-    
+
     Class accessTokenProviderClass = conf.getClass(ACCESS_TOKEN_PROVIDER_KEY,
         ConfCredentialBasedAccessTokenProvider.class,
         AccessTokenProvider.class);
-    
+
     accessTokenProvider = (AccessTokenProvider) ReflectionUtils
         .newInstance(accessTokenProviderClass, conf);
     accessTokenProvider.setConf(conf);
   }
-  
+
   @Override
   public HttpURLConnection configure(HttpURLConnection conn)
       throws IOException {
     if(sslConfigurator != null) {
       sslConfigurator.configure(conn);
     }
-    
+
     String accessToken = accessTokenProvider.getAccessToken();
-    
+
     conn.setRequestProperty("AUTHORIZATION", HEADER + accessToken);
-    
+
     return conn;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
index 190a1f5..3f091c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
@@ -32,8 +32,8 @@ public final class OAuth2Constants {
 
   public static final MediaType URLENCODED
       = MediaType.parse("application/x-www-form-urlencoded; charset=utf-8");
-  
-  /* Constants for OAuth protocol */ 
+
+  /* Constants for OAuth protocol */
   public static final String ACCESS_TOKEN = "access_token";
   public static final String BEARER = "bearer";
   public static final String CLIENT_CREDENTIALS = "client_credentials";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
index 939798d..f333b5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
@@ -40,7 +40,7 @@ final class Utils {
 
     return value;
   }
-  
+
   public static String postBody(String ... kv)
       throws UnsupportedEncodingException {
     if(kv.length % 2 != 0) {
@@ -48,7 +48,7 @@ final class Utils {
     }
     StringBuilder sb = new StringBuilder();
     int i = 0;
-    
+
     while(i < kv.length) {
       if(i > 0) {
         sb.append("&");
@@ -57,7 +57,7 @@ final class Utils {
       sb.append("=");
       sb.append(URLEncoder.encode(kv[i++], "UTF-8"));
     }
-    
+
     return sb.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
index aeb581f..23384b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
@@ -23,4 +23,4 @@
 @InterfaceAudience.Public
 package org.apache.hadoop.hdfs.web.oauth2;
 
-import org.apache.hadoop.classification.InterfaceAudience;
\ No newline at end of file
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
index 9bc938d..48c1317 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
@@ -46,4 +46,4 @@ public class AccessTimeParam extends LongParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
index 4c998b6..48f202c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
@@ -66,4 +66,4 @@ public class AclPermissionParam extends StringParam {
   private static String parseAclSpec(List<AclEntry> aclEntry) {
     return StringUtils.join(aclEntry, ",");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
index 3f53d7c..1c4185c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
@@ -57,4 +57,4 @@ public class BlockSizeParam extends LongParam {
     return getValue() != null? getValue()
         : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
index 376d7d8..2b0028b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
@@ -57,4 +57,4 @@ public class BufferSizeParam extends IntegerParam {
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
index b68c5f5..7eba417 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
@@ -59,7 +59,6 @@ public class ConcatSourcesParam extends StringParam {
 
   /** @return the absolute path. */
   public final String[] getAbsolutePaths() {
-    final String[] paths = getValue().split(",");
-    return paths;
+    return getValue().split(",");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateFlagParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateFlagParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateFlagParam.java
index 91dbf11..33f561a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateFlagParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateFlagParam.java
@@ -30,7 +30,7 @@ public class CreateFlagParam extends EnumSetParam<CreateFlag> {
 
   public static final String DEFAULT = "";
 
-  private static final Domain<CreateFlag> DOMAIN = new Domain<CreateFlag>(
+  private static final Domain<CreateFlag> DOMAIN = new Domain<>(
       NAME, CreateFlag.class);
 
   public CreateFlagParam(final EnumSet<CreateFlag> createFlags) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
index c701bb8..eaa5e8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
@@ -46,4 +46,4 @@ public class CreateParentParam extends BooleanParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
index 57be43e..5329580 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
@@ -41,4 +41,4 @@ public class DelegationParam extends StringParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
index 65275e0..25bed1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
@@ -22,7 +22,7 @@ import java.net.HttpURLConnection;
 /** Http DELETE operation parameter. */
 public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
   /** Delete operations. */
-  public static enum Op implements HttpOpParam.Op {
+  public enum Op implements HttpOpParam.Op {
     DELETE(HttpURLConnection.HTTP_OK),
     DELETESNAPSHOT(HttpURLConnection.HTTP_OK),
 
@@ -65,7 +65,7 @@ public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
     }
   }
 
-  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
+  private static final Domain<Op> DOMAIN = new Domain<>(NAME, Op.class);
 
   /**
    * Constructor.
@@ -79,4 +79,4 @@ public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
index 13d188c..6e8d985 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
@@ -38,4 +38,4 @@ public class DoAsParam extends StringParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
index 60d201b..9f9dd4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.web.resources;
 import java.util.Arrays;
 import org.apache.hadoop.util.StringUtils;
 
-abstract class EnumParam<E extends Enum<E>> extends Param<E, EnumParam.Domain<E>> {
+abstract class EnumParam<E extends Enum<E>>
+    extends Param<E, EnumParam.Domain<E>> {
   EnumParam(final Domain<E> domain, final E value) {
     super(domain, value);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
index 030abe7..33fd723 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
@@ -22,7 +22,8 @@ import java.util.EnumSet;
 import java.util.Iterator;
 import org.apache.hadoop.util.StringUtils;
 
-abstract class EnumSetParam<E extends Enum<E>> extends Param<EnumSet<E>, EnumSetParam.Domain<E>> {
+abstract class EnumSetParam<E extends Enum<E>>
+    extends Param<EnumSet<E>, EnumSetParam.Domain<E>> {
   /** Convert an EnumSet to a string of comma separated values. */
   static <E extends Enum<E>> String toString(EnumSet<E> set) {
     if (set == null || set.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
index 3f44fae..9c73b92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
@@ -39,4 +39,4 @@ public class ExcludeDatanodesParam extends StringParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index c39032c..119f15d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -22,7 +22,7 @@ import java.net.HttpURLConnection;
 /** Http GET operation parameter. */
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
   /** Get operations. */
-  public static enum Op implements HttpOpParam.Op {
+  public enum Op implements HttpOpParam.Op {
     OPEN(true, HttpURLConnection.HTTP_OK),
 
     GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
@@ -52,7 +52,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     }
 
     Op(final boolean redirect, final int expectedHttpResponseCode,
-       final boolean requireAuth) {
+        final boolean requireAuth) {
       this.redirect = redirect;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
       this.requireAuth = requireAuth;
@@ -89,7 +89,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     }
   }
 
-  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
+  private static final Domain<Op> DOMAIN = new Domain<>(NAME, Op.class);
 
   /**
    * Constructor.
@@ -103,4 +103,4 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
index c0429cc..fbcea0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
@@ -38,4 +38,4 @@ public class GroupParam extends StringParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
index 67224ef..cec4821 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
@@ -34,29 +34,29 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
   public static final String DEFAULT = NULL;
 
   /** Http operation types */
-  public static enum Type {
-    GET, PUT, POST, DELETE;
+  public enum Type {
+    GET, PUT, POST, DELETE
   }
 
   /** Http operation interface. */
-  public static interface Op {
+  public interface Op {
     /** @return the Http operation type. */
-    public Type getType();
+    Type getType();
 
     /** @return true if the operation cannot use a token */
-    public boolean getRequireAuth();
+    boolean getRequireAuth();
 
     /** @return true if the operation will do output. */
-    public boolean getDoOutput();
+    boolean getDoOutput();
 
     /** @return true if the operation will be redirected. */
-    public boolean getRedirect();
+    boolean getRedirect();
 
     /** @return true the expected http response code. */
-    public int getExpectedHttpResponseCode();
+    int getExpectedHttpResponseCode();
 
     /** @return a URI query string. */
-    public String toQueryString();
+    String toQueryString();
   }
 
   /** Expects HTTP response 307 "Temporary Redirect". */
@@ -131,4 +131,4 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
   HttpOpParam(final Domain<E> domain, final E value) {
     super(domain, value);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
index 5a609ee..5716b49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
@@ -51,4 +51,4 @@ public class LengthParam extends LongParam {
     Long v = getValue();
     return v == null ? -1 : v;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
index 12e0a94..45f0aa2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
@@ -19,7 +19,8 @@ package org.apache.hadoop.hdfs.web.resources;
 
 /** Long parameter. */
 abstract class LongParam extends Param<Long, LongParam.Domain> {
-  LongParam(final Domain domain, final Long value, final Long min, final Long max) {
+  LongParam(final Domain domain, final Long value, final Long min,
+            final Long max) {
     super(domain, value);
     checkRange(min, max);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
index 59911d7..fc3017f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
@@ -46,4 +46,4 @@ public class ModificationTimeParam extends LongParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
index 83aba9e..526e2fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
@@ -46,4 +46,4 @@ public class NewLengthParam extends LongParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
index 6d88703..eb00d75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
@@ -51,4 +51,4 @@ public class OffsetParam extends LongParam {
     Long offset = getValue();
     return (offset == null) ? Long.valueOf(0) : offset;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
index f6945bb..9610b93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
@@ -46,4 +46,4 @@ public class OverwriteParam extends BooleanParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
index a1c10aa..b83a2ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
@@ -38,4 +38,4 @@ public class OwnerParam extends StringParam {
   public String getName() {
     return NAME;
   }
-}
\ No newline at end of file
+}


Mime
View raw message