hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [05/10] hadoop git commit: HDFS-8979. Clean up checkstyle warnings in hadoop-hdfs-client module. Contributed by Mingliang Liu.
Date Sat, 03 Oct 2015 18:38:56 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
index 18c940b..7c1143b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -29,7 +28,7 @@ public class DatanodeInfoWithStorage extends DatanodeInfo {
   private final StorageType storageType;
 
   public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
-                                 StorageType storageType) {
+      StorageType storageType) {
     super(from);
     this.storageID = storageID;
     this.storageType = storageType;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
index b7b2289..8bff150 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
@@ -55,10 +55,8 @@ public class DatanodeLocalInfo {
 
   /** A formatted string for printing the status of the DataNode. */
   public String getDatanodeLocalReport() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append("Uptime: " + getUptime());
-    buffer.append(", Software version: " + getSoftwareVersion());
-    buffer.append(", Config version: " + getConfigVersion());
-    return buffer.toString();
+    return ("Uptime: " + getUptime())
+        + ", Software version: " + getSoftwareVersion()
+        + ", Config version: " + getConfigVersion();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
index a3cff82..eb6a0c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
@@ -39,7 +39,7 @@ public class EncryptionZoneIterator
   private final Tracer tracer;
 
   public EncryptionZoneIterator(ClientProtocol namenode, Tracer tracer) {
-    super(Long.valueOf(0));
+    super((long) 0);
     this.namenode = namenode;
     this.tracer = tracer;
   }
@@ -47,11 +47,8 @@ public class EncryptionZoneIterator
   @Override
   public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
       throws IOException {
-    TraceScope scope = tracer.newScope("listEncryptionZones");
-    try {
+    try (TraceScope ignored = tracer.newScope("listEncryptionZones")) {
       return namenode.listEncryptionZones(prevId);
-    } finally {
-      scope.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index e5dfdff..c976538 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.io.erasurecode.ECSchema;
 
-import java.util.Map;
-
 /**
  * A policy about how to write/read/code an erasure coding file.
  */
@@ -66,11 +64,9 @@ public final class ErasureCodingPolicy {
     }
     ErasureCodingPolicy that = (ErasureCodingPolicy) o;
 
-    if (that.getName().equals(name) && that.getCellSize() == cellSize
-        && that.getSchema().equals(schema)) {
-      return true;
-    }
-    return false;
+    return that.getName().equals(name) &&
+        that.getCellSize() == cellSize &&
+        that.getSchema().equals(schema);
   }
 
   @Override
@@ -83,11 +79,8 @@ public final class ErasureCodingPolicy {
 
   @Override
   public String toString() {
-    StringBuilder sb = new StringBuilder("ErasureCodingPolicy=[");
-    sb.append("Name=" + name + ", ");
-    sb.append("Schema=[" + schema.toString() + "], ");
-    sb.append("CellSize=" + cellSize + " ");
-    sb.append("]");
-    return sb.toString();
+    return "ErasureCodingPolicy=[" + "Name=" + name + ", "
+        + "Schema=[" + schema.toString() + "], "
+        + "CellSize=" + cellSize + " " + "]";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 6e05ce0..0b5b836 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -33,7 +33,8 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 @InterfaceStability.Evolving
 public class HdfsFileStatus {
 
-  private final byte[] path;  // local name of the inode that's encoded in java UTF8
+  // local name of the inode that's encoded in java UTF8
+  private final byte[] path;
   private final byte[] symlink; // symlink target encoded in java UTF8 or null
   private final long length;
   private final boolean isdir;
@@ -49,7 +50,7 @@ public class HdfsFileStatus {
   private final FileEncryptionInfo feInfo;
 
   private final ErasureCodingPolicy ecPolicy;
-  
+
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
   private final int childrenNum;
   private final byte storagePolicy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 2121dcf..0fd2039 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 
-/** 
+/**
  * Interface that represents the over the wire information
  * including block locations for a file.
  */
@@ -38,7 +38,7 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
 
   /**
    * Constructor
-   * 
+   *
    * @param length size
    * @param isdir if this is directory
    * @param block_replication the file's replication factor
@@ -49,7 +49,7 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
    * @param owner owner
    * @param group group
    * @param symlink symbolic link
-   * @param path local path name in java UTF8 format 
+   * @param path local path name in java UTF8 format
    * @param fileId the file id
    * @param locations block locations
    * @param feInfo file encryption info

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
index 1cd80f9..b9c8b96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
@@ -31,7 +31,8 @@ public class LastBlockWithStatus {
 
   private final HdfsFileStatus fileStatus;
 
-  public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus fileStatus) {
+  public LastBlockWithStatus(LocatedBlock lastBlock,
+      HdfsFileStatus fileStatus) {
     this.lastBlock = lastBlock;
     this.fileStatus = fileStatus;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index a9596bf..554d113 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -65,13 +65,13 @@ public class LocatedBlock {
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
-                      String[] storageIDs, StorageType[] storageTypes) {
+      String[] storageIDs, StorageType[] storageTypes) {
     this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
-                      StorageType[] storageTypes, long startOffset,
-                      boolean corrupt, DatanodeInfo[] cachedLocs) {
+      StorageType[] storageTypes, long startOffset,
+      boolean corrupt, DatanodeInfo[] cachedLocs) {
     this.b = b;
     this.offset = startOffset;
     this.corrupt = corrupt;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index 6e01bbe..4dbaa01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
 @InterfaceStability.Evolving
 public class LocatedBlocks {
   private final long fileLength;
-  private final List<LocatedBlock> blocks; // array of blocks with prioritized locations
+  // array of blocks with prioritized locations
+  private final List<LocatedBlock> blocks;
   private final boolean underConstruction;
   private final LocatedBlock lastLocatedBlock;
   private final boolean isLastBlockComplete;
@@ -134,22 +135,22 @@ public class LocatedBlocks {
     key.setStartOffset(offset);
     key.getBlock().setNumBytes(1);
     Comparator<LocatedBlock> comp =
-      new Comparator<LocatedBlock>() {
-        // Returns 0 iff a is inside b or b is inside a
-        @Override
-        public int compare(LocatedBlock a, LocatedBlock b) {
-          long aBeg = a.getStartOffset();
-          long bBeg = b.getStartOffset();
-          long aEnd = aBeg + a.getBlockSize();
-          long bEnd = bBeg + b.getBlockSize();
-          if(aBeg <= bBeg && bEnd <= aEnd
-              || bBeg <= aBeg && aEnd <= bEnd)
-            return 0; // one of the blocks is inside the other
-          if(aBeg < bBeg)
-            return -1; // a's left bound is to the left of the b's
-          return 1;
-        }
-      };
+        new Comparator<LocatedBlock>() {
+          // Returns 0 iff a is inside b or b is inside a
+          @Override
+          public int compare(LocatedBlock a, LocatedBlock b) {
+            long aBeg = a.getStartOffset();
+            long bBeg = b.getStartOffset();
+            long aEnd = aBeg + a.getBlockSize();
+            long bEnd = bBeg + b.getBlockSize();
+            if(aBeg <= bBeg && bEnd <= aEnd
+                || bBeg <= aBeg && aEnd <= bEnd)
+              return 0; // one of the blocks is inside the other
+            if(aBeg < bBeg)
+              return -1; // a's left bound is to the left of the b's
+            return 1;
+          }
+        };
     return Collections.binarySearch(blocks, key, comp);
   }
 
@@ -187,14 +188,10 @@ public class LocatedBlocks {
 
   @Override
   public String toString() {
-    final StringBuilder b = new StringBuilder(getClass().getSimpleName());
-    b.append("{")
-     .append("\n  fileLength=").append(fileLength)
-     .append("\n  underConstruction=").append(underConstruction)
-     .append("\n  blocks=").append(blocks)
-     .append("\n  lastLocatedBlock=").append(lastLocatedBlock)
-     .append("\n  isLastBlockComplete=").append(isLastBlockComplete)
-     .append("}");
-    return b.toString();
+    return getClass().getSimpleName() + "{" + "\n  fileLength=" + fileLength
+        + "\n  underConstruction=" + underConstruction
+        + "\n  blocks=" + blocks
+        + "\n  lastLocatedBlock=" + lastLocatedBlock
+        + "\n  isLastBlockComplete=" + isLastBlockComplete + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
index a9a80c2..e21df59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -39,6 +39,7 @@ public class LocatedStripedBlock extends LocatedBlock {
   private int[] blockIndices;
   private Token<BlockTokenIdentifier>[] blockTokens;
 
+  @SuppressWarnings({"unchecked"})
   public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
       String[] storageIDs, StorageType[] storageTypes, int[] indices,
       long startOffset, boolean corrupt, DatanodeInfo[] cachedLocs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
index eeedd5a..68ac58e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
@@ -43,7 +43,7 @@ public final class NSQuotaExceededException extends QuotaExceededException {
     String msg = super.getMessage();
     if (msg == null) {
       msg = "The NameSpace quota (directories and files)" +
-      (pathName==null?"":(" of directory " + pathName)) +
+          (pathName==null?"":(" of directory " + pathName)) +
           " is exceeded: quota=" + quota + " file count=" + count;
 
       if (prefix != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
index b527797..b7a7e98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
@@ -64,7 +64,6 @@ public class RollingUpgradeInfo extends RollingUpgradeStatus {
 
   /**
    * Finalize the upgrade if not already finalized
-   * @param finalizeTime
    */
   public void finalize(long finalizeTime) {
     if (finalizeTime != 0) {
@@ -99,8 +98,11 @@ public class RollingUpgradeInfo extends RollingUpgradeStatus {
   @Override
   public String toString() {
     return super.toString()
-      +  "\n     Start Time: " + (startTime == 0? "<NOT STARTED>": timestamp2String(startTime))
-      +  "\n  Finalize Time: " + (finalizeTime == 0? "<NOT FINALIZED>": timestamp2String(finalizeTime));
+        +  "\n     Start Time: "
+        + (startTime == 0 ? "<NOT STARTED>" : timestamp2String(startTime))
+        +  "\n  Finalize Time: "
+        + (finalizeTime == 0 ? "<NOT FINALIZED>" :
+        timestamp2String(finalizeTime));
   }
 
   private static String timestamp2String(long timestamp) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
index b58ed36..be3b94d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
@@ -27,7 +27,7 @@ import com.google.common.base.Objects;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 
 /**
- * This class represents to end users the difference between two snapshots of 
+ * This class represents to end users the difference between two snapshots of
  * the same directory, or the difference between a snapshot of the directory and
  * its current state. Instead of capturing all the details of the diff, this
  * class only lists where the changes happened and their types.
@@ -42,21 +42,21 @@ public class SnapshotDiffReport {
    * DELETE, and RENAME respectively.
    */
   public enum DiffType {
-    CREATE("+"),     
-    MODIFY("M"),    
-    DELETE("-"), 
+    CREATE("+"),
+    MODIFY("M"),
+    DELETE("-"),
     RENAME("R");
-    
+
     private final String label;
-    
-    private DiffType(String label) {
+
+    DiffType(String label) {
       this.label = label;
     }
-    
+
     public String getLabel() {
       return label;
     }
-    
+
     public static DiffType getTypeFromLabel(String label) {
       if (label.equals(CREATE.getLabel())) {
         return CREATE;
@@ -69,8 +69,8 @@ public class SnapshotDiffReport {
       }
       return null;
     }
-  };
-  
+  }
+
   /**
    * Representing the full path and diff type of a file/directory where changes
    * have happened.
@@ -98,7 +98,7 @@ public class SnapshotDiffReport {
       this.sourcePath = sourcePath;
       this.targetPath = targetPath;
     }
-    
+
     public DiffReportEntry(DiffType type, byte[][] sourcePathComponents,
         byte[][] targetPathComponents) {
       this.type = type;
@@ -106,7 +106,7 @@ public class SnapshotDiffReport {
       this.targetPath = targetPathComponents == null ? null : DFSUtilClient
           .byteArray2bytes(targetPathComponents);
     }
-    
+
     @Override
     public String toString() {
       String str = type.getLabel() + "\t" + getPathString(sourcePath);
@@ -115,7 +115,7 @@ public class SnapshotDiffReport {
       }
       return str;
     }
-    
+
     public DiffType getType() {
       return type;
     }
@@ -141,7 +141,7 @@ public class SnapshotDiffReport {
     public boolean equals(Object other) {
       if (this == other) {
         return true;
-      } 
+      }
       if (other != null && other instanceof DiffReportEntry) {
         DiffReportEntry entry = (DiffReportEntry) other;
         return type.equals(entry.getType())
@@ -150,25 +150,25 @@ public class SnapshotDiffReport {
       }
       return false;
     }
-    
+
     @Override
     public int hashCode() {
       return Objects.hashCode(getSourcePath(), getTargetPath());
     }
   }
-  
+
   /** snapshot root full path */
   private final String snapshotRoot;
 
   /** start point of the diff */
   private final String fromSnapshot;
-  
+
   /** end point of the diff */
   private final String toSnapshot;
-  
+
   /** list of diff */
   private final List<DiffReportEntry> diffList;
-  
+
   public SnapshotDiffReport(String snapshotRoot, String fromSnapshot,
       String toSnapshot, List<DiffReportEntry> entryList) {
     this.snapshotRoot = snapshotRoot;
@@ -177,7 +177,7 @@ public class SnapshotDiffReport {
     this.diffList = entryList != null ? entryList : Collections
         .<DiffReportEntry> emptyList();
   }
-  
+
   /** @return {@link #snapshotRoot}*/
   public String getSnapshotRoot() {
     return snapshotRoot;
@@ -192,23 +192,24 @@ public class SnapshotDiffReport {
   public String getLaterSnapshotName() {
     return toSnapshot;
   }
-  
+
   /** @return {@link #diffList} */
   public List<DiffReportEntry> getDiffList() {
     return diffList;
   }
-  
+
   @Override
   public String toString() {
     StringBuilder str = new StringBuilder();
-    String from = fromSnapshot == null || fromSnapshot.isEmpty() ? 
+    String from = fromSnapshot == null || fromSnapshot.isEmpty() ?
         "current directory" : "snapshot " + fromSnapshot;
     String to = toSnapshot == null || toSnapshot.isEmpty() ? "current directory"
         : "snapshot " + toSnapshot;
-    str.append("Difference between " + from + " and " + to
-        + " under directory " + snapshotRoot + ":" + LINE_SEPARATOR);
+    str.append("Difference between ").append(from).append(" and ").append(to)
+        .append(" under directory ").append(snapshotRoot).append(":")
+        .append(LINE_SEPARATOR);
     for (DiffReportEntry entry : diffList) {
-      str.append(entry.toString() + LINE_SEPARATOR);
+      str.append(entry.toString()).append(LINE_SEPARATOR);
     }
     return str.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 813ea26..583d027 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -31,37 +31,39 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
  */
 public class SnapshottableDirectoryStatus {
   /** Compare the statuses by full paths. */
-  public static final Comparator<SnapshottableDirectoryStatus> COMPARATOR
-      = new Comparator<SnapshottableDirectoryStatus>() {
-    @Override
-    public int compare(SnapshottableDirectoryStatus left,
-                       SnapshottableDirectoryStatus right) {
-      int d = DFSUtilClient.compareBytes(left.parentFullPath, right.parentFullPath);
-      return d != 0? d
-          : DFSUtilClient.compareBytes(left.dirStatus.getLocalNameInBytes(),
+  public static final Comparator<SnapshottableDirectoryStatus> COMPARATOR =
+      new Comparator<SnapshottableDirectoryStatus>() {
+        @Override
+        public int compare(SnapshottableDirectoryStatus left,
+            SnapshottableDirectoryStatus right) {
+          int d = DFSUtilClient.compareBytes(left.parentFullPath,
+              right.parentFullPath);
+          return d != 0? d
+              : DFSUtilClient.compareBytes(left.dirStatus.getLocalNameInBytes(),
               right.dirStatus.getLocalNameInBytes());
-    }
-  };
+        }
+      };
 
   /** Basic information of the snapshottable directory */
   private final HdfsFileStatus dirStatus;
-  
+
   /** Number of snapshots that have been taken*/
   private final int snapshotNumber;
-  
+
   /** Number of snapshots allowed. */
   private final int snapshotQuota;
-  
+
   /** Full path of the parent. */
   private final byte[] parentFullPath;
-  
+
   public SnapshottableDirectoryStatus(long modification_time, long access_time,
       FsPermission permission, String owner, String group, byte[] localName,
       long inodeId, int childrenNum,
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
-        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
+        null);
     this.snapshotNumber = snapshotNumber;
     this.snapshotQuota = snapshotQuota;
     this.parentFullPath = parentFullPath;
@@ -80,7 +82,7 @@ public class SnapshottableDirectoryStatus {
   public int getSnapshotQuota() {
     return snapshotQuota;
   }
-  
+
   /**
    * @return Full path of the parent
    */
@@ -94,13 +96,13 @@ public class SnapshottableDirectoryStatus {
   public HdfsFileStatus getDirStatus() {
     return dirStatus;
   }
-  
+
   /**
    * @return Full path of the file
    */
   public Path getFullPath() {
-    String parentFullPathStr = 
-        (parentFullPath == null || parentFullPath.length == 0) ? 
+    String parentFullPathStr =
+        (parentFullPath == null || parentFullPath.length == 0) ?
             null : DFSUtilClient.bytes2String(parentFullPath);
     if (parentFullPathStr == null
         && dirStatus.getLocalNameInBytes().length == 0) {
@@ -111,13 +113,13 @@ public class SnapshottableDirectoryStatus {
           : new Path(parentFullPathStr, dirStatus.getLocalName());
     }
   }
-  
+
   /**
    * Print a list of {@link SnapshottableDirectoryStatus} out to a given stream.
    * @param stats The list of {@link SnapshottableDirectoryStatus}
    * @param out The given stream for printing.
    */
-  public static void print(SnapshottableDirectoryStatus[] stats, 
+  public static void print(SnapshottableDirectoryStatus[] stats,
       PrintStream out) {
     if (stats == null || stats.length == 0) {
       out.println();
@@ -133,30 +135,28 @@ public class SnapshottableDirectoryStatus {
       maxSnapshotNum = maxLength(maxSnapshotNum, status.snapshotNumber);
       maxSnapshotQuota = maxLength(maxSnapshotQuota, status.snapshotQuota);
     }
-    
-    StringBuilder fmt = new StringBuilder();
-    fmt.append("%s%s "); // permission string
-    fmt.append("%"  + maxRepl  + "s ");
-    fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s");
-    fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s");
-    fmt.append("%"  + maxLen   + "s ");
-    fmt.append("%s "); // mod time
-    fmt.append("%"  + maxSnapshotNum  + "s ");
-    fmt.append("%"  + maxSnapshotQuota  + "s ");
-    fmt.append("%s"); // path
-    
-    String lineFormat = fmt.toString();
+
+    String lineFormat = "%s%s " // permission string
+        + "%"  + maxRepl  + "s "
+        + (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
+        + (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
+        + "%"  + maxLen   + "s "
+        + "%s " // mod time
+        + "%"  + maxSnapshotNum  + "s "
+        + "%"  + maxSnapshotQuota  + "s "
+        + "%s"; // path
+
     SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
-         
+
     for (SnapshottableDirectoryStatus status : stats) {
-      String line = String.format(lineFormat, "d", 
+      String line = String.format(lineFormat, "d",
           status.dirStatus.getPermission(),
           status.dirStatus.getReplication(),
           status.dirStatus.getOwner(),
           status.dirStatus.getGroup(),
           String.valueOf(status.dirStatus.getLen()),
           dateFormat.format(new Date(status.dirStatus.getModificationTime())),
-          status.snapshotNumber, status.snapshotQuota, 
+          status.snapshotNumber, status.snapshotQuota,
           status.getFullPath().toString()
       );
       out.println(line);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
index 03fb704..fc1d3d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.Path;
 
-/** 
+/**
  * Thrown when a symbolic link is encountered in a path.
  */
 @InterfaceAudience.Private
@@ -43,7 +41,7 @@ public final class UnresolvedPathException extends UnresolvedLinkException {
   public UnresolvedPathException(String msg) {
     super(msg);
   }
-  
+
   public UnresolvedPathException(String path, String preceding,
       String remainder, String linkTarget) {
     this.path = path;
@@ -55,7 +53,7 @@ public final class UnresolvedPathException extends UnresolvedLinkException {
   /**
    * Return a path with the link resolved with the target.
    */
-  public Path getResolvedPath() throws IOException {
+  public Path getResolvedPath() {
     // If the path is absolute we cam throw out the preceding part and
     // just append the remainder to the target, otherwise append each
     // piece to resolve the link in path.
@@ -76,12 +74,6 @@ public final class UnresolvedPathException extends UnresolvedLinkException {
     if (msg != null) {
       return msg;
     }
-    String myMsg = "Unresolved path " + path;
-    try {
-      return getResolvedPath().toString();
-    } catch (IOException e) {
-      // Ignore
-    }
-    return myMsg;
+    return getResolvedPath().toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
index 5f86e52..36f5fd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Evolving
 public enum BlockConstructionStage {
   /** The enumerates are always listed as regular stage followed by the
-   * recovery stage. 
+   * recovery stage.
    * Changing this order will make getRecoveryStage not working.
    */
   // pipeline set up for block append
@@ -46,9 +46,9 @@ public enum BlockConstructionStage {
   TRANSFER_RBW,
   // transfer Finalized for adding datanodes
   TRANSFER_FINALIZED;
-  
+
   final static private byte RECOVERY_BIT = (byte)1;
-  
+
   /**
    * get the recovery stage of this stage
    */
@@ -59,4 +59,4 @@ public enum BlockConstructionStage {
       return values()[ordinal()|RECOVERY_BIT];
     }
   }
-}    
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
index e585328..6801149 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
@@ -65,7 +65,9 @@ public abstract class DataTransferProtoUtil {
   }
 
   public static DataChecksum fromProto(ChecksumProto proto) {
-    if (proto == null) return null;
+    if (proto == null) {
+      return null;
+    }
 
     int bytesPerChecksum = proto.getBytesPerChecksum();
     DataChecksum.Type type = PBHelperClient.convert(proto.getType());
@@ -74,19 +76,17 @@ public abstract class DataTransferProtoUtil {
 
   static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
       String client, Token<BlockTokenIdentifier> blockToken) {
-    ClientOperationHeaderProto header =
-      ClientOperationHeaderProto.newBuilder()
-        .setBaseHeader(buildBaseHeader(blk, blockToken))
-        .setClientName(client)
-        .build();
-    return header;
+    return ClientOperationHeaderProto.newBuilder()
+      .setBaseHeader(buildBaseHeader(blk, blockToken))
+      .setClientName(client)
+      .build();
   }
 
   static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
       Token<BlockTokenIdentifier> blockToken) {
     BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
-      .setBlock(PBHelperClient.convert(blk))
-      .setToken(PBHelperClient.convert(blockToken));
+        .setBlock(PBHelperClient.convert(blk))
+        .setToken(PBHelperClient.convert(blockToken));
     SpanId spanId = Tracer.getCurrentSpanId();
     if (spanId.isValid()) {
       builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
index 1f7e378..4aa545b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
@@ -39,21 +39,21 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public interface DataTransferProtocol {
-  public static final Logger LOG = LoggerFactory.getLogger(DataTransferProtocol.class);
-  
+  Logger LOG = LoggerFactory.getLogger(DataTransferProtocol.class);
+
   /** Version for data transfers between clients and datanodes
    * This should change when serialization of DatanodeInfo, not just
-   * when protocol changes. It is not very obvious. 
+   * when protocol changes. It is not very obvious.
    */
   /*
    * Version 28:
    *    Declare methods in DataTransferProtocol interface.
    */
-  public static final int DATA_TRANSFER_VERSION = 28;
+  int DATA_TRANSFER_VERSION = 28;
 
-  /** 
+  /**
    * Read a block.
-   * 
+   *
    * @param blk the block being read.
    * @param blockToken security token for accessing the block.
    * @param clientName client's name.
@@ -63,7 +63,7 @@ public interface DataTransferProtocol {
    *        checksums
    * @param cachingStrategy  The caching strategy to use.
    */
-  public void readBlock(final ExtendedBlock blk,
+  void readBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final long blockOffset,
@@ -77,7 +77,7 @@ public interface DataTransferProtocol {
    * The other downstream datanodes are specified by the targets parameter.
    * Note that the receiver {@link DatanodeInfo} is not required in the
    * parameter list since the receiver datanode knows its info.  However, the
-   * {@link StorageType} for storing the replica in the receiver datanode is a 
+   * {@link StorageType} for storing the replica in the receiver datanode is a
    * parameter since the receiver datanode may support multiple storage types.
    *
    * @param blk the block being written.
@@ -96,12 +96,12 @@ public interface DataTransferProtocol {
    * @param pinning whether to pin the block, so Balancer won't move it.
    * @param targetPinnings whether to pin the block on target datanode
    */
-  public void writeBlock(final ExtendedBlock blk,
-      final StorageType storageType, 
+  void writeBlock(final ExtendedBlock blk,
+      final StorageType storageType,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final DatanodeInfo[] targets,
-      final StorageType[] targetStorageTypes, 
+      final StorageType[] targetStorageTypes,
       final DatanodeInfo source,
       final BlockConstructionStage stage,
       final int pipelineSize,
@@ -118,13 +118,13 @@ public interface DataTransferProtocol {
    * The block stage must be
    * either {@link BlockConstructionStage#TRANSFER_RBW}
    * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
-   * 
+   *
    * @param blk the block being transferred.
    * @param blockToken security token for accessing the block.
    * @param clientName client's name.
    * @param targets target datanodes.
    */
-  public void transferBlock(final ExtendedBlock blk,
+  void transferBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final DatanodeInfo[] targets,
@@ -135,14 +135,14 @@ public interface DataTransferProtocol {
    *
    * @param blk             The block to get file descriptors for.
    * @param blockToken      Security token for accessing the block.
-   * @param slotId          The shared memory slot id to use, or null 
+   * @param slotId          The shared memory slot id to use, or null
    *                          to use no slot id.
-   * @param maxVersion      Maximum version of the block data the client 
+   * @param maxVersion      Maximum version of the block data the client
    *                          can understand.
    * @param supportsReceiptVerification  True if the client supports
    *                          receipt verification.
    */
-  public void requestShortCircuitFds(final ExtendedBlock blk,
+  void requestShortCircuitFds(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
         throws IOException;
@@ -152,51 +152,51 @@ public interface DataTransferProtocol {
    *
    * @param slotId          SlotID used by the earlier file descriptors.
    */
-  public void releaseShortCircuitFds(final SlotId slotId) throws IOException;
+  void releaseShortCircuitFds(final SlotId slotId) throws IOException;
 
   /**
    * Request a short circuit shared memory area from a DataNode.
-   * 
+   *
    * @param clientName       The name of the client.
    */
-  public void requestShortCircuitShm(String clientName) throws IOException;
-  
+  void requestShortCircuitShm(String clientName) throws IOException;
+
   /**
    * Receive a block from a source datanode
    * and then notifies the namenode
    * to remove the copy from the original datanode.
    * Note that the source datanode and the original datanode can be different.
    * It is used for balancing purpose.
-   * 
+   *
    * @param blk the block being replaced.
    * @param storageType the {@link StorageType} for storing the block.
    * @param blockToken security token for accessing the block.
    * @param delHint the hint for deleting the block in the original datanode.
    * @param source the source datanode for receiving the block.
    */
-  public void replaceBlock(final ExtendedBlock blk,
-      final StorageType storageType, 
+  void replaceBlock(final ExtendedBlock blk,
+      final StorageType storageType,
       final Token<BlockTokenIdentifier> blockToken,
       final String delHint,
       final DatanodeInfo source) throws IOException;
 
   /**
-   * Copy a block. 
+   * Copy a block.
    * It is used for balancing purpose.
-   * 
+   *
    * @param blk the block being copied.
    * @param blockToken security token for accessing the block.
    */
-  public void copyBlock(final ExtendedBlock blk,
+  void copyBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken) throws IOException;
 
   /**
    * Get block checksum (MD5 of CRC32).
-   * 
+   *
    * @param blk a block.
    * @param blockToken security token for accessing the block.
    * @throws IOException
    */
-  public void blockChecksum(final ExtendedBlock blk,
+  void blockChecksum(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
index 23407f8..4157a30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
@@ -29,9 +29,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class IOStreamPair {
   public final InputStream in;
   public final OutputStream out;
-  
+
   public IOStreamPair(InputStream in, OutputStream out) {
     this.in = in;
     this.out = out;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
index 3077498..511574c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
@@ -42,11 +42,11 @@ public enum Op {
 
   /** The code for this operation. */
   public final byte code;
-  
-  private Op(byte code) {
+
+  Op(byte code) {
     this.code = code;
   }
-  
+
   private static final int FIRST_CODE = values()[0].code;
   /** Return the object represented by the code. */
   private static Op valueOf(byte code) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
index c9966a7..dbf5e80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
@@ -36,14 +36,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * Header data for each packet that goes through the read/write pipelines.
  * Includes all of the information about the packet, excluding checksums and
  * actual data.
- * 
+ *
  * This data includes:
  *  - the offset in bytes into the HDFS block of the data in this packet
  *  - the sequence number of this packet in the pipeline
  *  - whether or not this is the last packet in the pipeline
  *  - the length of the data in this packet
  *  - whether or not this packet should be synced by the DNs.
- *  
+ *
  * When serialized, this header is written out as a protocol buffer, preceded
  * by a 4-byte integer representing the full packet length, and a 2-byte short
  * representing the header length.
@@ -51,8 +51,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class PacketHeader {
-  private static final int MAX_PROTO_SIZE = 
-    PacketHeaderProto.newBuilder()
+  private static final int MAX_PROTO_SIZE = PacketHeaderProto.newBuilder()
       .setOffsetInBlock(0)
       .setSeqno(0)
       .setLastPacketInBlock(false)
@@ -76,21 +75,21 @@ public class PacketHeader {
     Preconditions.checkArgument(packetLen >= Ints.BYTES,
         "packet len %s should always be at least 4 bytes",
         packetLen);
-    
+
     PacketHeaderProto.Builder builder = PacketHeaderProto.newBuilder()
-      .setOffsetInBlock(offsetInBlock)
-      .setSeqno(seqno)
-      .setLastPacketInBlock(lastPacketInBlock)
-      .setDataLen(dataLen);
-      
+        .setOffsetInBlock(offsetInBlock)
+        .setSeqno(seqno)
+        .setLastPacketInBlock(lastPacketInBlock)
+        .setDataLen(dataLen);
+
     if (syncBlock) {
       // Only set syncBlock if it is specified.
       // This is wire-incompatible with Hadoop 2.0.0-alpha due to HDFS-3721
       // because it changes the length of the packet header, and BlockReceiver
       // in that version did not support variable-length headers.
-      builder.setSyncBlock(syncBlock);
+      builder.setSyncBlock(true);
     }
-      
+
     proto = builder.build();
   }
 
@@ -121,16 +120,16 @@ public class PacketHeader {
   @Override
   public String toString() {
     return "PacketHeader with packetLen=" + packetLen +
-      " header data: " + 
+      " header data: " +
       proto.toString();
   }
-  
+
   public void setFieldsFromData(
       int packetLen, byte[] headerData) throws InvalidProtocolBufferException {
     this.packetLen = packetLen;
     proto = PacketHeaderProto.parseFrom(headerData);
   }
-  
+
   public void readFields(ByteBuffer buf) throws IOException {
     packetLen = buf.getInt();
     short protoLen = buf.getShort();
@@ -138,7 +137,7 @@ public class PacketHeader {
     buf.get(data);
     proto = PacketHeaderProto.parseFrom(data);
   }
-  
+
   public void readFields(DataInputStream in) throws IOException {
     this.packetLen = in.readInt();
     short protoLen = in.readShort();
@@ -170,7 +169,7 @@ public class PacketHeader {
       throw new RuntimeException(e);
     }
   }
-  
+
   public void write(DataOutputStream out) throws IOException {
     assert proto.getSerializedSize() <= MAX_PROTO_SIZE
     : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize();
@@ -178,7 +177,7 @@ public class PacketHeader {
     out.writeShort(proto.getSerializedSize());
     proto.writeTo(out);
   }
-  
+
   public byte[] getBytes() {
     ByteBuffer buf = ByteBuffer.allocate(getSerializedSize());
     putInBuffer(buf);
@@ -187,8 +186,8 @@ public class PacketHeader {
 
   /**
    * Perform a sanity check on the packet, returning true if it is sane.
-   * @param lastSeqNo the previous sequence number received - we expect the current
-   * sequence number to be larger by 1.
+   * @param lastSeqNo the previous sequence number received - we expect the
+   *                  current sequence number to be larger by 1.
    */
   public boolean sanityCheck(long lastSeqNo) {
     // We should only have a non-positive data length for the last packet
@@ -196,8 +195,7 @@ public class PacketHeader {
     // The last packet should not contain data
     if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false;
     // Seqnos should always increase by 1 with each packet received
-    if (proto.getSeqno() != lastSeqNo + 1) return false;
-    return true;
+    return proto.getSeqno() == lastSeqNo + 1;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
index e6709d9..93e9217 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
@@ -48,7 +48,7 @@ public class PacketReceiver implements Closeable {
   private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
 
   static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class);
-  
+
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
   private final boolean useDirectBuffers;
 
@@ -58,12 +58,12 @@ public class PacketReceiver implements Closeable {
    * length prefixes.
    */
   private ByteBuffer curPacketBuf = null;
-  
+
   /**
    * A slice of {@link #curPacketBuf} which contains just the checksums.
    */
   private ByteBuffer curChecksumSlice = null;
-  
+
   /**
    * A slice of {@link #curPacketBuf} which contains just the data.
    */
@@ -73,7 +73,7 @@ public class PacketReceiver implements Closeable {
    * The packet header of the most recently read packet.
    */
   private PacketHeader curHeader;
-  
+
   public PacketReceiver(boolean useDirectBuffers) {
     this.useDirectBuffers = useDirectBuffers;
     reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN);
@@ -86,14 +86,14 @@ public class PacketReceiver implements Closeable {
   public ByteBuffer getDataSlice() {
     return curDataSlice;
   }
-  
+
   public ByteBuffer getChecksumSlice() {
     return curChecksumSlice;
   }
 
   /**
    * Reads all of the data for the next packet into the appropriate buffers.
-   * 
+   *
    * The data slice and checksum slice members will be set to point to the
    * user data and corresponding checksums. The header will be parsed and
    * set.
@@ -134,7 +134,7 @@ public class PacketReceiver implements Closeable {
     doReadFully(ch, in, curPacketBuf);
     curPacketBuf.flip();
     int payloadLen = curPacketBuf.getInt();
-    
+
     if (payloadLen < Ints.BYTES) {
       // The "payload length" includes its own length. Therefore it
       // should never be less than 4 bytes
@@ -146,7 +146,7 @@ public class PacketReceiver implements Closeable {
     if (headerLen < 0) {
       throw new IOException("Invalid header length " + headerLen);
     }
-    
+
     LOG.trace("readNextPacket: dataPlusChecksumLen={}, headerLen={}",
         dataPlusChecksumLen, headerLen);
 
@@ -177,18 +177,18 @@ public class PacketReceiver implements Closeable {
       curHeader = new PacketHeader();
     }
     curHeader.setFieldsFromData(payloadLen, headerBuf);
-    
+
     // Compute the sub-slices of the packet
     int checksumLen = dataPlusChecksumLen - curHeader.getDataLen();
     if (checksumLen < 0) {
-      throw new IOException("Invalid packet: data length in packet header " + 
+      throw new IOException("Invalid packet: data length in packet header " +
           "exceeds data length received. dataPlusChecksumLen=" +
-          dataPlusChecksumLen + " header: " + curHeader); 
+          dataPlusChecksumLen + " header: " + curHeader);
     }
-    
+
     reslicePacket(headerLen, checksumLen, curHeader.getDataLen());
   }
-  
+
   /**
    * Rewrite the last-read packet on the wire to the given output stream.
    */
@@ -200,7 +200,7 @@ public class PacketReceiver implements Closeable {
         curPacketBuf.remaining());
   }
 
-  
+
   private static void doReadFully(ReadableByteChannel ch, InputStream in,
       ByteBuffer buf) throws IOException {
     if (ch != null) {
@@ -222,7 +222,7 @@ public class PacketReceiver implements Closeable {
     //   32-bit  16-bit   <protobuf>  <variable length>
     //   |--- lenThroughHeader ----|
     //   |----------- lenThroughChecksums   ----|
-    //   |------------------- lenThroughData    ------| 
+    //   |------------------- lenThroughData    ------|
     int lenThroughHeader = PacketHeader.PKT_LENGTHS_LEN + headerLen;
     int lenThroughChecksums = lenThroughHeader + checksumsLen;
     int lenThroughData = lenThroughChecksums + dataLen;
@@ -242,14 +242,14 @@ public class PacketReceiver implements Closeable {
     curPacketBuf.position(lenThroughChecksums);
     curPacketBuf.limit(lenThroughData);
     curDataSlice = curPacketBuf.slice();
-    
+
     // Reset buffer to point to the entirety of the packet (including
     // length prefixes)
     curPacketBuf.position(0);
     curPacketBuf.limit(lenThroughData);
   }
 
-  
+
   private static void readChannelFully(ReadableByteChannel ch, ByteBuffer buf)
       throws IOException {
     while (buf.remaining() > 0) {
@@ -259,7 +259,7 @@ public class PacketReceiver implements Closeable {
       }
     }
   }
-  
+
   private void reallocPacketBuf(int atLeastCapacity) {
     // Realloc the buffer if this packet is longer than the previous
     // one.
@@ -277,12 +277,12 @@ public class PacketReceiver implements Closeable {
         curPacketBuf.flip();
         newBuf.put(curPacketBuf);
       }
-      
+
       returnPacketBufToPool();
       curPacketBuf = newBuf;
     }
   }
-  
+
   private void returnPacketBufToPool() {
     if (curPacketBuf != null && curPacketBuf.isDirect()) {
       bufferPool.returnBuffer(curPacketBuf);
@@ -294,7 +294,7 @@ public class PacketReceiver implements Closeable {
   public void close() {
     returnPacketBufToPool();
   }
-  
+
   @Override
   protected void finalize() throws Throwable {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
index 3836606..be822d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
@@ -94,7 +94,7 @@ public class PipelineAck {
   /** default constructor **/
   public PipelineAck() {
   }
-  
+
   /**
    * Constructor assuming no next DN in pipeline
    * @param seqno sequence number
@@ -125,7 +125,7 @@ public class PipelineAck {
       .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
       .build();
   }
-  
+
   /**
    * Get the sequence number
    * @return the sequence number
@@ -133,7 +133,7 @@ public class PipelineAck {
   public long getSeqno() {
     return proto.getSeqno();
   }
-  
+
   /**
    * Get the number of replies
    * @return the number of replies
@@ -141,7 +141,7 @@ public class PipelineAck {
   public short getNumOfReplies() {
     return (short)proto.getReplyCount();
   }
-  
+
   /**
    * get the header flag of ith reply
    */
@@ -179,7 +179,7 @@ public class PipelineAck {
   }
 
   /**
-   * Returns the OOB status if this ack contains one. 
+   * Returns the OOB status if this ack contains one.
    * @return null if it is not an OOB ack.
    */
   public Status getOOBStatus() {
@@ -216,7 +216,7 @@ public class PipelineAck {
   public void write(OutputStream out) throws IOException {
     proto.writeDelimitedTo(out);
   }
-  
+
   @Override //Object
   public String toString() {
     return TextFormat.shortDebugString(proto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
index c69986a..c21a6a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
@@ -43,19 +43,19 @@ public class ReplaceDatanodeOnFailure {
 
     private final Condition condition;
 
-    private Policy(Condition condition) {
+    Policy(Condition condition) {
       this.condition = condition;
     }
-    
+
     Condition getCondition() {
       return condition;
     }
   }
 
   /** Datanode replacement condition */
-  private static interface Condition {
+  private interface Condition {
     /** Return true unconditionally. */
-    static final Condition TRUE = new Condition() {
+    Condition TRUE = new Condition() {
       @Override
       public boolean satisfy(short replication, DatanodeInfo[] existings,
           int nExistings, boolean isAppend, boolean isHflushed) {
@@ -64,7 +64,7 @@ public class ReplaceDatanodeOnFailure {
     };
 
     /** Return false unconditionally. */
-    static final Condition FALSE = new Condition() {
+    Condition FALSE = new Condition() {
       @Override
       public boolean satisfy(short replication, DatanodeInfo[] existings,
           int nExistings, boolean isAppend, boolean isHflushed) {
@@ -80,31 +80,24 @@ public class ReplaceDatanodeOnFailure {
      *   (1) floor(r/2) >= n; or
      *   (2) r > n and the block is hflushed/appended.
      */
-    static final Condition DEFAULT = new Condition() {
+    Condition DEFAULT = new Condition() {
       @Override
       public boolean satisfy(final short replication,
           final DatanodeInfo[] existings, final int n, final boolean isAppend,
           final boolean isHflushed) {
-        if (replication < 3) {
-          return false;
-        } else {
-          if (n <= (replication/2)) {
-            return true;
-          } else {
-            return isAppend || isHflushed;
-          }
-        }
+        return replication >= 3 &&
+            (n <= (replication / 2) || isAppend || isHflushed);
       }
     };
 
     /** Is the condition satisfied? */
-    public boolean satisfy(short replication, DatanodeInfo[] existings,
-        int nExistings, boolean isAppend, boolean isHflushed);
+    boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings,
+                    boolean isAppend, boolean isHflushed);
   }
 
   private final Policy policy;
   private final boolean bestEffort;
-  
+
   public ReplaceDatanodeOnFailure(Policy policy, boolean bestEffort) {
     this.policy = policy;
     this.bestEffort = bestEffort;
@@ -124,7 +117,7 @@ public class ReplaceDatanodeOnFailure {
    * Best effort means that the client will try to replace the failed datanode
    * (provided that the policy is satisfied), however, it will continue the
    * write operation in case that the datanode replacement also fails.
-   * 
+   *
    * @return Suppose the datanode replacement fails.
    *     false: An exception should be thrown so that the write will fail.
    *     true : The write should be resumed with the remaining datandoes.
@@ -137,16 +130,13 @@ public class ReplaceDatanodeOnFailure {
   public boolean satisfy(
       final short replication, final DatanodeInfo[] existings,
       final boolean isAppend, final boolean isHflushed) {
-    final int n = existings == null? 0: existings.length;
-    if (n == 0 || n >= replication) {
-      //don't need to add datanode for any policy.
-      return false;
-    } else {
-      return policy.getCondition().satisfy(
-          replication, existings, n, isAppend, isHflushed);
-    }
+    final int n = existings == null ? 0 : existings.length;
+    //don't need to add datanode for any policy.
+    return !(n == 0 || n >= replication) &&
+        policy.getCondition().satisfy(replication, existings, n, isAppend,
+            isHflushed);
   }
-  
+
   @Override
   public String toString() {
     return policy.toString();
@@ -158,7 +148,7 @@ public class ReplaceDatanodeOnFailure {
     final boolean bestEffort = conf.getBoolean(
         HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY,
         HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_DEFAULT);
-    
+
     return new ReplaceDatanodeOnFailure(policy, bestEffort);
   }
 
@@ -197,4 +187,4 @@ public class ReplaceDatanodeOnFailure {
         HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY,
         bestEffort);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
index d2bc348..6545681 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
@@ -61,12 +61,11 @@ public class Sender implements DataTransferProtocol {
 
   /** Create a sender for DataTransferProtocol with a output stream. */
   public Sender(final DataOutputStream out) {
-    this.out = out;    
+    this.out = out;
   }
 
   /** Initialize a operation. */
-  private static void op(final DataOutput out, final Op op
-      ) throws IOException {
+  private static void op(final DataOutput out, final Op op) throws IOException {
     out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
     op.write(out);
   }
@@ -80,13 +79,14 @@ public class Sender implements DataTransferProtocol {
     out.flush();
   }
 
-  static private CachingStrategyProto getCachingStrategy(CachingStrategy cachingStrategy) {
+  static private CachingStrategyProto getCachingStrategy(
+      CachingStrategy cachingStrategy) {
     CachingStrategyProto.Builder builder = CachingStrategyProto.newBuilder();
     if (cachingStrategy.getReadahead() != null) {
-      builder.setReadahead(cachingStrategy.getReadahead().longValue());
+      builder.setReadahead(cachingStrategy.getReadahead());
     }
     if (cachingStrategy.getDropBehind() != null) {
-      builder.setDropBehind(cachingStrategy.getDropBehind().booleanValue());
+      builder.setDropBehind(cachingStrategy.getDropBehind());
     }
     return builder.build();
   }
@@ -101,24 +101,25 @@ public class Sender implements DataTransferProtocol {
       final CachingStrategy cachingStrategy) throws IOException {
 
     OpReadBlockProto proto = OpReadBlockProto.newBuilder()
-      .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
-      .setOffset(blockOffset)
-      .setLen(length)
-      .setSendChecksums(sendChecksum)
-      .setCachingStrategy(getCachingStrategy(cachingStrategy))
-      .build();
+        .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName,
+            blockToken))
+        .setOffset(blockOffset)
+        .setLen(length)
+        .setSendChecksums(sendChecksum)
+        .setCachingStrategy(getCachingStrategy(cachingStrategy))
+        .build();
 
     send(out, Op.READ_BLOCK, proto);
   }
-  
+
 
   @Override
   public void writeBlock(final ExtendedBlock blk,
-      final StorageType storageType, 
+      final StorageType storageType,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final DatanodeInfo[] targets,
-      final StorageType[] targetStorageTypes, 
+      final StorageType[] targetStorageTypes,
       final DatanodeInfo source,
       final BlockConstructionStage stage,
       final int pipelineSize,
@@ -132,26 +133,27 @@ public class Sender implements DataTransferProtocol {
       final boolean[] targetPinnings) throws IOException {
     ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
         blk, clientName, blockToken);
-    
+
     ChecksumProto checksumProto =
-      DataTransferProtoUtil.toProto(requestedChecksum);
+        DataTransferProtoUtil.toProto(requestedChecksum);
 
     OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
-      .setHeader(header)
-      .setStorageType(PBHelperClient.convertStorageType(storageType))
-      .addAllTargets(PBHelperClient.convert(targets, 1))
-      .addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
-      .setStage(toProto(stage))
-      .setPipelineSize(pipelineSize)
-      .setMinBytesRcvd(minBytesRcvd)
-      .setMaxBytesRcvd(maxBytesRcvd)
-      .setLatestGenerationStamp(latestGenerationStamp)
-      .setRequestedChecksum(checksumProto)
-      .setCachingStrategy(getCachingStrategy(cachingStrategy))
-      .setAllowLazyPersist(allowLazyPersist)
-      .setPinning(pinning)
-      .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));
-    
+        .setHeader(header)
+        .setStorageType(PBHelperClient.convertStorageType(storageType))
+        .addAllTargets(PBHelperClient.convert(targets, 1))
+        .addAllTargetStorageTypes(
+            PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
+        .setStage(toProto(stage))
+        .setPipelineSize(pipelineSize)
+        .setMinBytesRcvd(minBytesRcvd)
+        .setMaxBytesRcvd(maxBytesRcvd)
+        .setLatestGenerationStamp(latestGenerationStamp)
+        .setRequestedChecksum(checksumProto)
+        .setCachingStrategy(getCachingStrategy(cachingStrategy))
+        .setAllowLazyPersist(allowLazyPersist)
+        .setPinning(pinning)
+        .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));
+
     if (source != null) {
       proto.setSource(PBHelperClient.convertDatanodeInfo(source));
     }
@@ -165,13 +167,14 @@ public class Sender implements DataTransferProtocol {
       final String clientName,
       final DatanodeInfo[] targets,
       final StorageType[] targetStorageTypes) throws IOException {
-    
+
     OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
-      .setHeader(DataTransferProtoUtil.buildClientHeader(
-          blk, clientName, blockToken))
-      .addAllTargets(PBHelperClient.convert(targets))
-      .addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes))
-      .build();
+        .setHeader(DataTransferProtoUtil.buildClientHeader(
+            blk, clientName, blockToken))
+        .addAllTargets(PBHelperClient.convert(targets))
+        .addAllTargetStorageTypes(
+            PBHelperClient.convertStorageTypes(targetStorageTypes))
+        .build();
 
     send(out, Op.TRANSFER_BLOCK, proto);
   }
@@ -180,11 +183,11 @@ public class Sender implements DataTransferProtocol {
   public void requestShortCircuitFds(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
-        throws IOException {
+      throws IOException {
     OpRequestShortCircuitAccessProto.Builder builder =
         OpRequestShortCircuitAccessProto.newBuilder()
-          .setHeader(DataTransferProtoUtil.buildBaseHeader(
-            blk, blockToken)).setMaxVersion(maxVersion);
+            .setHeader(DataTransferProtoUtil.buildBaseHeader(
+                blk, blockToken)).setMaxVersion(maxVersion);
     if (slotId != null) {
       builder.setSlotId(PBHelperClient.convert(slotId));
     }
@@ -192,12 +195,12 @@ public class Sender implements DataTransferProtocol {
     OpRequestShortCircuitAccessProto proto = builder.build();
     send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto);
   }
-  
+
   @Override
   public void releaseShortCircuitFds(SlotId slotId) throws IOException {
     ReleaseShortCircuitAccessRequestProto.Builder builder =
         ReleaseShortCircuitAccessRequestProto.newBuilder().
-        setSlotId(PBHelperClient.convert(slotId));
+            setSlotId(PBHelperClient.convert(slotId));
     SpanId spanId = Tracer.getCurrentSpanId();
     if (spanId.isValid()) {
       builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
@@ -212,7 +215,7 @@ public class Sender implements DataTransferProtocol {
   public void requestShortCircuitShm(String clientName) throws IOException {
     ShortCircuitShmRequestProto.Builder builder =
         ShortCircuitShmRequestProto.newBuilder().
-        setClientName(clientName);
+            setClientName(clientName);
     SpanId spanId = Tracer.getCurrentSpanId();
     if (spanId.isValid()) {
       builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
@@ -222,20 +225,20 @@ public class Sender implements DataTransferProtocol {
     ShortCircuitShmRequestProto proto = builder.build();
     send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
   }
-  
+
   @Override
   public void replaceBlock(final ExtendedBlock blk,
-      final StorageType storageType, 
+      final StorageType storageType,
       final Token<BlockTokenIdentifier> blockToken,
       final String delHint,
       final DatanodeInfo source) throws IOException {
     OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
-      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-      .setStorageType(PBHelperClient.convertStorageType(storageType))
-      .setDelHint(delHint)
-      .setSource(PBHelperClient.convertDatanodeInfo(source))
-      .build();
-    
+        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+        .setStorageType(PBHelperClient.convertStorageType(storageType))
+        .setDelHint(delHint)
+        .setSource(PBHelperClient.convertDatanodeInfo(source))
+        .build();
+
     send(out, Op.REPLACE_BLOCK, proto);
   }
 
@@ -243,9 +246,9 @@ public class Sender implements DataTransferProtocol {
   public void copyBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken) throws IOException {
     OpCopyBlockProto proto = OpCopyBlockProto.newBuilder()
-      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-      .build();
-    
+        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+        .build();
+
     send(out, Op.COPY_BLOCK, proto);
   }
 
@@ -253,9 +256,9 @@ public class Sender implements DataTransferProtocol {
   public void blockChecksum(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken) throws IOException {
     OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder()
-      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-      .build();
-    
+        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+        .build();
+
     send(out, Op.BLOCK_CHECKSUM, proto);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7136e8c5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
index 3846f4a..71e5abd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils;
  * The default implementation is to return false indicating that
  * the channel is not trusted.
  * This class can be overridden to provide custom logic to determine
- * whether a channel is trusted or not. 
+ * whether a channel is trusted or not.
  * The custom class can be specified via configuration.
  *
  */
@@ -39,14 +39,13 @@ public class TrustedChannelResolver implements Configurable {
   /**
    * Returns an instance of TrustedChannelResolver.
    * Looks up the configuration to see if there is custom class specified.
-   * @param conf
    * @return TrustedChannelResolver
    */
   public static TrustedChannelResolver getInstance(Configuration conf) {
     Class<? extends TrustedChannelResolver> clazz =
-      conf.getClass(
-          HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
-          TrustedChannelResolver.class, TrustedChannelResolver.class);
+        conf.getClass(
+            HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
+            TrustedChannelResolver.class, TrustedChannelResolver.class);
     return ReflectionUtils.newInstance(clazz, conf);
   }
 


Mime
View raw message