hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1213867 [1/2] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ s...
Date Tue, 13 Dec 2011 19:02:43 GMT
Author: atm
Date: Tue Dec 13 19:02:37 2011
New Revision: 1213867

URL: http://svn.apache.org/viewvc?rev=1213867&view=rev
Log:
Merge trunk into HA branch.

Added:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
      - copied unchanged from r1213862, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
Modified:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1213862
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Dec 13 19:02:37 2011
@@ -32,6 +32,10 @@ Trunk (unreleased changes)
     HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol, 
     ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
 
+    HDFS-2663. Handle protobuf optional parameters correctly. (suresh)
+
+    HDFS-2666. Fix TestBackupNode failure. (suresh)
+
   IMPROVEMENTS
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple 
@@ -88,8 +92,8 @@ Trunk (unreleased changes)
     (suresh)
 
     HADOOP-7862   Hdfs changes to work with HADOOP 7862: 
-                  Move the support for multiple protocols to lower layer so that Writable,
-                  PB and Avro can all use it (Sanjay)
+    Move the support for multiple protocols to lower layer so that Writable,
+    PB and Avro can all use it (Sanjay)
 
     HDFS-1580. Add interface for generic Write Ahead Logging mechanisms.
     (Ivan Kelly via jitendra)
@@ -100,16 +104,17 @@ Trunk (unreleased changes)
 
     HDFS-2651 ClientNameNodeProtocol Translators for Protocol Buffers (sanjay)
 
+    HDFS-2650. Replace @inheritDoc with @Override. (Hari Mankude via suresh).
+
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the
-               namenode state. (Tomasz Nykiel via hairong)
+    namenode state. (Tomasz Nykiel via hairong)
 
     HDFS-2495. Increase granularity of write operations in ReplicationMonitor
     thus reducing contention for write lock. (Tomasz Nykiel via hairong)
 
     HDFS-2476. More CPU efficient data structure for under-replicated,
-               over-replicated, and invalidated blocks.
-               (Tomasz Nykiel via todd)
+    over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
 
   BUG FIXES
     HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
@@ -132,7 +137,7 @@ Trunk (unreleased changes)
     info on the console with security enabled. (Arpit Gupta via suresh)
 
     HDFS-2349. Corruption detected during block transfers between DNs
-               should log a WARN instead of INFO. (harsh)
+    should log a WARN instead of INFO. (harsh)
 
     HDFS-2188. Make FSEditLog create its journals from a list of URIs rather 
     than NNStorage. (Ivan Kelly via jitendra)
@@ -143,14 +148,17 @@ Trunk (unreleased changes)
     HDFS-2497 Fix TestBackupNode failure. (suresh)
 
     HDFS-2526. (Client)NamenodeProtocolTranslatorR23 do not need to keep a
-               reference to rpcProxyWithoutRetry (atm)
+    reference to rpcProxyWithoutRetry (atm)
 
     HDFS-2532. TestDfsOverAvroRpc timing out in trunk (Uma Maheswara Rao G
-               via todd)
+    via todd)
 
     HDFS-2606. webhdfs client filesystem impl must set the content-type 
     header for create/append. (tucu)
 
+    HDFS-1765. Block Replication should respect under-replication
+    block priority. (Uma Maheswara Rao G via eli)
+
 Release 0.23.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -199,6 +207,8 @@ Release 0.23.1 - UNRELEASED
     HDFS-2604. Add a log message to show if WebHDFS is enabled and a
     configuration section in the forrest doc.  (szetszwo)
 
+    HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli)
+
   OPTIMIZATIONS
 
     HDFS-2130. Switch default checksum to CRC32C. (todd)
@@ -231,6 +241,9 @@ Release 0.23.1 - UNRELEASED
     HDFS-2590. Fix the missing links in the WebHDFS forrest doc.  (szetszwo)
 
     HDFS-2596. TestDirectoryScanner doesn't test parallel scans. (eli)
+
+    HDFS-2653. DFSClient should cache whether addrs are non-local when
+    short-circuiting is enabled. (eli)
     
 Release 0.23.0 - 2011-11-01 
 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1213862
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/main/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Tue Dec 13 19:02:37 2011
@@ -21,6 +21,7 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.net.Socket;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.HashMap;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.util.DirectBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
@@ -57,8 +59,8 @@ import org.apache.hadoop.util.DataChecks
  * if security is enabled.</li>
  * </ul>
  */
-class BlockReaderLocal extends RemoteBlockReader2 {
-  public static final Log LOG = LogFactory.getLog(DFSClient.class);
+class BlockReaderLocal implements BlockReader {
+  private static final Log LOG = LogFactory.getLog(DFSClient.class);
 
   //Stores the cache and proxy for a local datanode.
   private static class LocalDatanodeInfo {
@@ -117,13 +119,24 @@ class BlockReaderLocal extends RemoteBlo
   private static Map<Integer, LocalDatanodeInfo> localDatanodeInfoMap = new HashMap<Integer, LocalDatanodeInfo>();
 
   private final FileInputStream dataIn; // reader for the data file
-
   private FileInputStream checksumIn;   // reader for the checksum file
 
   private int offsetFromChunkBoundary;
   
-  ByteBuffer dataBuff = null;
-  ByteBuffer checksumBuff = null;
+  private byte[] skipBuf = null;
+  private ByteBuffer dataBuff = null;
+  private ByteBuffer checksumBuff = null;
+  private DataChecksum checksum;
+  private final boolean verifyChecksum;
+
+  private static DirectBufferPool bufferPool = new DirectBufferPool();
+
+  private int bytesPerChecksum;
+  private int checksumSize;
+
+  /** offset in block where reader wants to actually read */
+  private long startOffset;
+  private final String filename;
   
   /**
    * The only way this object can be instantiated.
@@ -256,9 +269,14 @@ class BlockReaderLocal extends RemoteBlo
       long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
       boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
       FileInputStream checksumIn) throws IOException {
-    super(hdfsfile, block.getBlockPoolId(), block.getBlockId(), dataIn
-        .getChannel(), checksum, verifyChecksum, startOffset, firstChunkOffset,
-        length, null);
+    this.filename = hdfsfile;
+    this.checksum = checksum;
+    this.verifyChecksum = verifyChecksum;
+    this.startOffset = Math.max(startOffset, 0);
+
+    bytesPerChecksum = this.checksum.getBytesPerChecksum();
+    checksumSize = this.checksum.getChecksumSize();
+
     this.dataIn = dataIn;
     this.checksumIn = checksumIn;
     this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset);
@@ -322,10 +340,8 @@ class BlockReaderLocal extends RemoteBlo
         readIntoBuffer(checksumIn, checksumBuff);
         checksumBuff.flip();
         dataBuff.flip();
-        if (verifyChecksum) {
-          checksum.verifyChunkedSums(dataBuff, checksumBuff, filename,
-              this.startOffset);
-        }
+        checksum.verifyChunkedSums(dataBuff, checksumBuff, filename,
+            this.startOffset);
       } else {
         dataRead = dataBuff.remaining();
       }
@@ -356,9 +372,24 @@ class BlockReaderLocal extends RemoteBlo
     }
     if (!verifyChecksum) {
       return dataIn.skip(n);
-    } else {
-     return super.skip(n);
     }
+    // Skip by reading the data so we stay in sync with checksums.
+    // This could be implemented more efficiently in the future to
+    // skip to the beginning of the appropriate checksum chunk
+    // and then only read to the middle of that chunk.
+    if (skipBuf == null) {
+      skipBuf = new byte[bytesPerChecksum];
+    }
+    long nSkipped = 0;
+    while ( nSkipped < n ) {
+      int toSkip = (int)Math.min(n-nSkipped, skipBuf.length);
+      int ret = read(skipBuf, 0, toSkip);
+      if ( ret <= 0 ) {
+        return nSkipped;
+      }
+      nSkipped += ret;
+    }
+    return nSkipped;
   }
 
   @Override
@@ -375,6 +406,27 @@ class BlockReaderLocal extends RemoteBlo
       bufferPool.returnBuffer(checksumBuff);
       checksumBuff = null;
     }
-    super.close();
+    startOffset = -1;
+    checksum = null;
+  }
+
+  @Override
+  public int readAll(byte[] buf, int offset, int len) throws IOException {
+    return BlockReaderUtil.readAll(this, buf, offset, len);
+  }
+
+  @Override
+  public void readFully(byte[] buf, int off, int len) throws IOException {
+    BlockReaderUtil.readFully(this, buf, off, len);
+  }
+
+  @Override
+  public Socket takeSocket() {
+    return null;
+  }
+
+  @Override
+  public boolean hasSentStatusCode() {
+    return false;
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java Tue Dec 13 19:02:37 2011
@@ -81,17 +81,13 @@ public class CorruptFileBlockIterator im
     }
   }
 
-  /**
-   * {@inheritDoc}
-   */
+  
   @Override
   public boolean hasNext() {
     return nextPath != null;
   }
 
-  /**
-   * {@inheritDoc}
-   */
+  
   @Override
   public Path next() throws IOException {
     if (!hasNext()) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Tue Dec 13 19:02:37 2011
@@ -33,10 +33,8 @@ import java.net.URI;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 import javax.net.SocketFactory;
 
@@ -575,12 +573,13 @@ public class DFSClient implements java.i
     }
   }
   
-  private static Set<String> localIpAddresses = Collections
-      .synchronizedSet(new HashSet<String>());
+  private static Map<String, Boolean> localAddrMap = Collections
+      .synchronizedMap(new HashMap<String, Boolean>());
   
   private static boolean isLocalAddress(InetSocketAddress targetAddr) {
     InetAddress addr = targetAddr.getAddress();
-    if (localIpAddresses.contains(addr.getHostAddress())) {
+    Boolean cached = localAddrMap.get(addr.getHostAddress());
+    if (cached != null && cached) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("Address " + targetAddr + " is local");
       }
@@ -601,9 +600,7 @@ public class DFSClient implements java.i
     if (LOG.isTraceEnabled()) {
       LOG.trace("Address " + targetAddr + " is local");
     }
-    if (local == true) {
-      localIpAddresses.add(addr.getHostAddress());
-    }
+    localAddrMap.put(addr.getHostAddress(), local);
     return local;
   }
   
@@ -1735,8 +1732,7 @@ public class DFSClient implements java.i
     }
   }
   
-  boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr)
-      throws IOException {
+  boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr) {
     if (shortCircuitLocalReads && isLocalAddress(targetAddr)) {
       return true;
     }
@@ -1759,7 +1755,7 @@ public class DFSClient implements java.i
     }
   }
 
-  /** {@inheritDoc} */
+  @Override
   public String toString() {
     return getClass().getSimpleName() + "[clientName=" + clientName
         + ", ugi=" + ugi + "]"; 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Tue Dec 13 19:02:37 2011
@@ -179,7 +179,7 @@ public class DistributedFileSystem exten
     workingDir = makeAbsolute(dir);
   }
 
-  /** {@inheritDoc} */
+  
   @Override
   public Path getHomeDirectory() {
     return makeQualified(new Path("/user/" + dfs.ugi.getShortUserName()));
@@ -306,7 +306,7 @@ public class DistributedFileSystem exten
     dfs.concat(getPathName(trg), srcs);
   }
 
-  /** {@inheritDoc} */
+  
   @SuppressWarnings("deprecation")
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
@@ -315,7 +315,6 @@ public class DistributedFileSystem exten
   }
 
   /** 
-   * {@inheritDoc}
    * This rename operation is guaranteed to be atomic.
    */
   @SuppressWarnings("deprecation")
@@ -331,7 +330,6 @@ public class DistributedFileSystem exten
     return dfs.delete(getPathName(f), recursive);
   }
   
-  /** {@inheritDoc} */
   @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     statistics.incrementReadOps(1);
@@ -512,7 +510,7 @@ public class DistributedFileSystem exten
     return dfs.primitiveMkdir(getPathName(f), absolutePermission);
   }
 
-  /** {@inheritDoc} */
+ 
   @Override
   public void close() throws IOException {
     try {
@@ -552,7 +550,6 @@ public class DistributedFileSystem exten
     }
   }
   
-  /** {@inheritDoc} */
   @Override
   public FsStatus getStatus(Path p) throws IOException {
     statistics.incrementReadOps(1);
@@ -614,9 +611,6 @@ public class DistributedFileSystem exten
     return dfs.getCorruptBlocksCount();
   }
 
-  /**
-   * {@inheritDoc}
-   */
   @Override
   public RemoteIterator<Path> listCorruptFileBlocks(Path path)
     throws IOException {
@@ -694,7 +688,6 @@ public class DistributedFileSystem exten
     dfs.metaSave(pathname);
   }
 
-  /** {@inheritDoc} */
   @Override
   public FsServerDefaults getServerDefaults() throws IOException {
     return dfs.getServerDefaults();
@@ -765,14 +758,12 @@ public class DistributedFileSystem exten
     }
   }
 
-  /** {@inheritDoc} */
   @Override
   public MD5MD5CRC32FileChecksum getFileChecksum(Path f) throws IOException {
     statistics.incrementReadOps(1);
     return dfs.getFileChecksum(getPathName(f));
   }
 
-  /** {@inheritDoc }*/
   @Override
   public void setPermission(Path p, FsPermission permission
       ) throws IOException {
@@ -780,7 +771,6 @@ public class DistributedFileSystem exten
     dfs.setPermission(getPathName(p), permission);
   }
 
-  /** {@inheritDoc }*/
   @Override
   public void setOwner(Path p, String username, String groupname
       ) throws IOException {
@@ -791,7 +781,6 @@ public class DistributedFileSystem exten
     dfs.setOwner(getPathName(p), username, groupname);
   }
 
-  /** {@inheritDoc }*/
   @Override
   public void setTimes(Path p, long mtime, long atime
       ) throws IOException {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java Tue Dec 13 19:02:37 2011
@@ -529,7 +529,7 @@ public class HftpFileSystem extends File
   private class ChecksumParser extends DefaultHandler {
     private FileChecksum filechecksum;
 
-    /** {@inheritDoc} */
+    @Override
     public void startElement(String ns, String localname, String qname,
                 Attributes attrs) throws SAXException {
       if (!MD5MD5CRC32FileChecksum.class.getName().equals(qname)) {
@@ -563,7 +563,7 @@ public class HftpFileSystem extends File
     }
   }
 
-  /** {@inheritDoc} */
+  @Override
   public FileChecksum getFileChecksum(Path f) throws IOException {
     final String s = makeQualified(f).toUri().getPath();
     return new ChecksumParser().getFileChecksum(s);
@@ -611,7 +611,7 @@ public class HftpFileSystem extends File
   private class ContentSummaryParser extends DefaultHandler {
     private ContentSummary contentsummary;
 
-    /** {@inheritDoc} */
+    @Override
     public void startElement(String ns, String localname, String qname,
                 Attributes attrs) throws SAXException {
       if (!ContentSummary.class.getName().equals(qname)) {
@@ -697,7 +697,7 @@ public class HftpFileSystem extends File
     }
   }
 
-  /** {@inheritDoc} */
+  @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     final String s = makeQualified(f).toUri().getPath();
     final ContentSummary cs = new ContentSummaryParser().getContentSummary(s);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java Tue Dec 13 19:02:37 2011
@@ -85,7 +85,7 @@ public class RemoteBlockReader2  impleme
   
   Socket dnSock; //for now just sending the status code (e.g. checksumOk) after the read.
   private ReadableByteChannel in;
-  protected DataChecksum checksum;
+  private DataChecksum checksum;
   
   private PacketHeader curHeader;
   private ByteBuffer curPacketBuf = null;
@@ -96,25 +96,24 @@ public class RemoteBlockReader2  impleme
   private long lastSeqNo = -1;
 
   /** offset in block where reader wants to actually read */
-  protected long startOffset;
-  protected final String filename;
+  private long startOffset;
+  private final String filename;
 
-  protected static DirectBufferPool bufferPool =
-    new DirectBufferPool();
+  private static DirectBufferPool bufferPool = new DirectBufferPool();
   private ByteBuffer headerBuf = ByteBuffer.allocate(
       PacketHeader.PKT_HEADER_LEN);
 
-  protected int bytesPerChecksum;
-  protected int checksumSize;
+  private int bytesPerChecksum;
+  private int checksumSize;
 
   /**
    * The total number of bytes we need to transfer from the DN.
    * This is the amount that the user has requested plus some padding
    * at the beginning so that the read can begin on a chunk boundary.
    */
-  protected long bytesNeededToFinish;
+  private long bytesNeededToFinish;
 
-  protected final boolean verifyChecksum;
+  private final boolean verifyChecksum;
 
   private boolean sentStatusCode = false;
   
@@ -389,29 +388,12 @@ public class RemoteBlockReader2  impleme
 
   @Override
   public int readAll(byte[] buf, int offset, int len) throws IOException {
-    int n = 0;
-    for (;;) {
-      int nread = read(buf, offset + n, len - n);
-      if (nread <= 0) 
-        return (n == 0) ? nread : n;
-      n += nread;
-      if (n >= len)
-        return n;
-    }
+    return BlockReaderUtil.readAll(this, buf, offset, len);
   }
 
   @Override
-  public void readFully(byte[] buf, int off, int len)
-      throws IOException {
-    int toRead = len;
-    while (toRead > 0) {
-      int ret = read(buf, off, toRead);
-      if (ret < 0) {
-        throw new IOException("Premature EOF from inputStream");
-      }
-      toRead -= ret;
-      off += ret;
-    }    
+  public void readFully(byte[] buf, int off, int len) throws IOException {
+    BlockReaderUtil.readFully(this, buf, off, len);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java Tue Dec 13 19:02:37 2011
@@ -53,9 +53,7 @@ public class CorruptFileBlocks implement
     return cookie;
   }
 
-  /**
-   * {@inheritDoc}
-   */
+  
   @Override
   public void readFields(DataInput in) throws IOException {
     int fileCount = in.readInt();
@@ -66,9 +64,7 @@ public class CorruptFileBlocks implement
     cookie = Text.readString(in);
   }
 
-  /**
-   * {@inheritDoc}
-   */
+  
   @Override
   public void write(DataOutput out) throws IOException {
     out.writeInt(files.length);
@@ -78,9 +74,8 @@ public class CorruptFileBlocks implement
     Text.writeString(out, cookie);
   }
 
-  /**
-   * {@inheritDoc}
-   */
+ 
+  @Override
   public boolean equals(Object obj) {
     if (this == obj) {
       return true;
@@ -93,9 +88,8 @@ public class CorruptFileBlocks implement
       Arrays.equals(files, other.files);
   }
 
-  /**
-   * {@inheritDoc}
-   */
+  
+  @Override
   public int hashCode() {
     int result = cookie.hashCode();
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Tue Dec 13 19:02:37 2011
@@ -185,14 +185,14 @@ public class DatanodeID implements Writa
   /////////////////////////////////////////////////
   // Writable
   /////////////////////////////////////////////////
-  /** {@inheritDoc} */
+  @Override
   public void write(DataOutput out) throws IOException {
     DeprecatedUTF8.writeString(out, name);
     DeprecatedUTF8.writeString(out, storageID);
     out.writeShort(infoPort);
   }
 
-  /** {@inheritDoc} */
+  @Override
   public void readFields(DataInput in) throws IOException {
     name = DeprecatedUTF8.readString(in);
     storageID = DeprecatedUTF8.readString(in);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Tue Dec 13 19:02:37 2011
@@ -382,7 +382,7 @@ public class DatanodeInfo extends Datano
        });
   }
 
-  /** {@inheritDoc} */
+  @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
 
@@ -400,7 +400,7 @@ public class DatanodeInfo extends Datano
     WritableUtils.writeEnum(out, getAdminState());
   }
 
-  /** {@inheritDoc} */
+  @Override
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java Tue Dec 13 19:02:37 2011
@@ -159,7 +159,7 @@ public class LocatedBlock implements Wri
     return lb;
   }
 
-  /** {@inheritDoc} */
+  @Override
   public String toString() {
     return getClass().getSimpleName() + "{" + b
         + "; getBlockSize()=" + getBlockSize()

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java Tue Dec 13 19:02:37 2011
@@ -225,7 +225,6 @@ public class LocatedBlocks implements Wr
     }
   }
 
-  /** {@inheritDoc} */
   @Override
   public String toString() {
     final StringBuilder b = new StringBuilder(getClass().getSimpleName());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Tue Dec 13 19:02:37 2011
@@ -24,6 +24,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
@@ -124,6 +128,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
@@ -208,11 +213,16 @@ public class ClientNamenodeProtocolServe
       RpcController controller, GetBlockLocationsRequestProto req)
       throws ServiceException {
     try {
-      return GetBlockLocationsResponseProto
-          .newBuilder()
-          .setLocations(
-              PBHelper.convert(server.getBlockLocations(req.getSrc(),
-                  req.getOffset(), req.getLength()))).build();
+      LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
+          req.getLength());
+      Builder builder = GetBlockLocationsResponseProto
+          .newBuilder();
+      if (b != null) {
+        builder.setLocations(
+            PBHelper.convert(server.getBlockLocations(req.getSrc(),
+                req.getOffset(), req.getLength()))).build();
+      }
+      return builder.build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -325,7 +335,7 @@ public class ClientNamenodeProtocolServe
       return AddBlockResponseProto.newBuilder().setBlock(
           PBHelper.convert(
           server.addBlock(req.getSrc(), req.getClientName(), 
-                PBHelper.convert(req.getPrevious()), 
+                req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, 
                 PBHelper.convert(
                   (DatanodeInfoProto[]) req.getExcludeNodesList().toArray()))))
            .build();
@@ -594,10 +604,14 @@ public class ClientNamenodeProtocolServe
       RpcController controller, DistributedUpgradeProgressRequestProto req)
       throws ServiceException {
     try {
-      UpgradeStatusReportProto result = PBHelper.convert(server
-          .distributedUpgradeProgress(PBHelper.convert(req.getAction())));
-      return DistributedUpgradeProgressResponseProto.newBuilder()
-          .setReport(result).build();
+      UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
+          .convert(req.getAction()));
+      DistributedUpgradeProgressResponseProto.Builder builder = 
+          DistributedUpgradeProgressResponseProto.newBuilder();
+      if (result != null) {
+        builder.setReport(PBHelper.convert(result));
+      }
+      return builder.build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -636,9 +650,13 @@ public class ClientNamenodeProtocolServe
   public GetFileInfoResponseProto getFileInfo(RpcController controller,
       GetFileInfoRequestProto req) throws ServiceException {
     try {
-      HdfsFileStatusProto result = 
-          PBHelper.convert(server.getFileInfo(req.getSrc()));
-      return GetFileInfoResponseProto.newBuilder().setFs(result).build();
+      HdfsFileStatus res = server.getFileInfo(req.getSrc());
+      GetFileInfoResponseProto.Builder builder = 
+          GetFileInfoResponseProto.newBuilder();
+      if (res != null) {
+        builder.setFs(PBHelper.convert(res));
+      }
+      return builder.build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Tue Dec 13 19:02:37 2011
@@ -83,14 +83,17 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
@@ -205,7 +208,10 @@ public class ClientNamenodeProtocolTrans
         .setLength(length)
         .build();
     try {
-      return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations());
+      GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
+          req);
+      return resp.hasLocations() ? 
+        PBHelper.convert(resp.getLocations()) : null;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
@@ -329,12 +335,15 @@ public class ClientNamenodeProtocolTrans
       throws AccessControlException, FileNotFoundException,
       NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
       IOException {
-    AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src)
-        .setClientName(clientName).setPrevious(PBHelper.convert(previous))
-        .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)))
-        .build();
+    AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder();
+    builder.setSrc(src)
+        .setClientName(clientName)
+        .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)));
+    if (previous != null) {
+      builder.setPrevious(PBHelper.convert(previous));
+    }
     try {
-      return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock());
+      return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock());
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
@@ -615,8 +624,9 @@ public class ClientNamenodeProtocolTrans
         DistributedUpgradeProgressRequestProto.newBuilder().
         setAction(PBHelper.convert(action)).build();
     try {
-      return PBHelper.convert(
-          rpcProxy.distributedUpgradeProgress(null, req).getReport());
+      DistributedUpgradeProgressResponseProto res = rpcProxy
+          .distributedUpgradeProgress(null, req);
+      return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
@@ -653,7 +663,8 @@ public class ClientNamenodeProtocolTrans
     GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
         .setSrc(src).build();
     try {
-      return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs());
+      GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
+      return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Tue Dec 13 19:02:37 2011
@@ -206,7 +206,7 @@ public class DatanodeProtocolClientSideT
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
     }
-    return PBHelper.convert(resp.getCmd());
+    return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
   }
 
   @Override
@@ -264,7 +264,7 @@ public class DatanodeProtocolClientSideT
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
     }
-    return PBHelper.convert(resp.getCmd());
+    return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Tue Dec 13 19:02:37 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
@@ -110,7 +111,9 @@ public class DatanodeProtocolServerSideT
     DatanodeCommand[] cmds = response.getCommands();
     if (cmds != null) {
       for (int i = 0; i < cmds.length; i++) {
-        builder.addCmds(i, PBHelper.convert(cmds[i]));
+        if (cmds[i] != null) {
+          builder.addCmds(PBHelper.convert(cmds[i]));
+        }
       }
     }
     return builder.build();
@@ -131,8 +134,12 @@ public class DatanodeProtocolServerSideT
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd))
-        .build();
+    BlockReportResponseProto.Builder builder = 
+        BlockReportResponseProto.newBuilder();
+    if (cmd != null) {
+      builder.setCmd(PBHelper.convert(cmd));
+    }
+    return builder.build();
   }
 
   @Override
@@ -182,14 +189,20 @@ public class DatanodeProtocolServerSideT
   @Override
   public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
       ProcessUpgradeRequestProto request) throws ServiceException {
-    UpgradeCommand cmd;
+    UpgradeCommand ret;
     try {
-      cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd()));
+      UpgradeCommand cmd = request.hasCmd() ? PBHelper
+          .convert(request.getCmd()) : null;
+      ret = impl.processUpgradeCommand(cmd);
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return ProcessUpgradeResponseProto.newBuilder()
-        .setCmd(PBHelper.convert(cmd)).build();
+    ProcessUpgradeResponseProto.Builder builder = 
+        ProcessUpgradeResponseProto.newBuilder();
+    if (ret != null) {
+      builder.setCmd(PBHelper.convert(ret));
+    }
+    return builder.build();
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Tue Dec 13 19:02:37 2011
@@ -129,6 +129,10 @@ import com.google.protobuf.ByteString;
 
 /**
  * Utilities for converting protobuf classes to and from implementation classes.
+ * 
+ * Note that when converting from an internal type to protobuf type, the
+ * converter never return null for protobuf type. The check for internal type
+ * being null must be done before calling the convert() method.
  */
 public class PBHelper {
   private static final RegisterCommandProto REG_CMD_PROTO = 
@@ -341,16 +345,19 @@ public class PBHelper {
 
   public static CheckpointCommandProto convert(CheckpointCommand cmd) {
     return CheckpointCommandProto.newBuilder()
-        .setSignature(convert(cmd.getSignature())).build();
+        .setSignature(convert(cmd.getSignature()))
+        .setNeedToReturnImage(cmd.needToReturnImage()).build();
   }
 
   public static NamenodeCommandProto convert(NamenodeCommand cmd) {
     if (cmd instanceof CheckpointCommand) {
       return NamenodeCommandProto.newBuilder().setAction(cmd.getAction())
-          .setType(NamenodeCommandProto.Type.NamenodeCommand)
+          .setType(NamenodeCommandProto.Type.CheckPointCommand)
           .setCheckpointCmd(convert((CheckpointCommand) cmd)).build();
     }
-    return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()).build();
+    return NamenodeCommandProto.newBuilder()
+        .setType(NamenodeCommandProto.Type.NamenodeCommand)
+        .setAction(cmd.getAction()).build();
   }
 
   public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) {
@@ -369,6 +376,7 @@ public class PBHelper {
   }
 
   public static NamenodeCommand convert(NamenodeCommandProto cmd) {
+    if (cmd == null) return null;
     switch (cmd.getType()) {
     case CheckPointCommand:
       CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
@@ -425,7 +433,8 @@ public class PBHelper {
     if (di == null) return null;
     return new DatanodeInfo(
         PBHelper.convert(di.getId()),
-        di.getLocation(), di.getHostName(),
+        di.hasLocation() ? di.getLocation() : null , 
+        di.hasHostName() ? di.getHostName() : null,
         di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
         di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
         PBHelper.convert(di.getAdminState())); 
@@ -433,10 +442,16 @@ public class PBHelper {
   
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
     if (di == null) return null;
-    return DatanodeInfoProto.newBuilder().
+    DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
+    if (di.getHostName() != null) {
+      builder.setHostName(di.getHostName());
+    }
+    if (di.getNetworkLocation() != null) {
+      builder.setLocation(di.getNetworkLocation());
+    }
+        
+    return builder.
      setId(PBHelper.convert((DatanodeID) di)).
-     setLocation(di.getNetworkLocation()).
-     setHostName(di.getHostName()).
      setCapacity(di.getCapacity()).
      setDfsUsed(di.getDfsUsed()).
      setRemaining(di.getRemaining()).
@@ -776,9 +791,14 @@ public class PBHelper {
 
   public static ReceivedDeletedBlockInfoProto convert(
       ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
-    return ReceivedDeletedBlockInfoProto.newBuilder()
-        .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
-        .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
+    ReceivedDeletedBlockInfoProto.Builder builder = 
+        ReceivedDeletedBlockInfoProto.newBuilder();
+    
+    if (receivedDeletedBlockInfo.getDelHints() != null) {
+      builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
+    }
+    return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
+        .build();
   }
 
   public static UpgradeCommandProto convert(UpgradeCommand comm) {
@@ -802,7 +822,7 @@ public class PBHelper {
   public static ReceivedDeletedBlockInfo convert(
       ReceivedDeletedBlockInfoProto proto) {
     return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
-        proto.getDeleteHint());
+        proto.hasDeleteHint() ? proto.getDeleteHint() : null);
   }
   
   public static NamespaceInfoProto convert(NamespaceInfo info) {
@@ -862,13 +882,10 @@ public class PBHelper {
   
   // LocatedBlocks
   public static LocatedBlocks convert(LocatedBlocksProto lb) {
-    if (lb == null) {
-      return null;
-    }
     return new LocatedBlocks(
         lb.getFileLength(), lb.getUnderConstruction(),
         PBHelper.convertLocatedBlock(lb.getBlocksList()),
-        PBHelper.convert(lb.getLastBlock()),
+        lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
         lb.getIsLastBlockComplete());
   }
   
@@ -876,11 +893,15 @@ public class PBHelper {
     if (lb == null) {
       return null;
     }
-    return LocatedBlocksProto.newBuilder().
-      setFileLength(lb.getFileLength()).
-      setUnderConstruction(lb.isUnderConstruction()).
-      addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())).
-      setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build();
+    LocatedBlocksProto.Builder builder = 
+        LocatedBlocksProto.newBuilder();
+    if (lb.getLastLocatedBlock() != null) {
+      builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
+    }
+    return builder.setFileLength(lb.getFileLength())
+        .setUnderConstruction(lb.isUnderConstruction())
+        .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
+        .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
   }
   
   public static FsServerDefaults convert(FsServerDefaultsProto fs) {
@@ -981,11 +1002,16 @@ public class PBHelper {
       setPermission(PBHelper.convert(fs.getPermission())).
       setOwner(fs.getOwner()).
       setGroup(fs.getGroup()).
-      setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())).
       setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
-    LocatedBlocks locations = null;
+    
+    if (fs.getSymlink() != null) {
+      builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
+    }
     if (fs instanceof HdfsLocatedFileStatus) {
-      builder.setLocations(PBHelper.convert(locations));
+      LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations();
+      if (locations != null) {
+        builder.setLocations(PBHelper.convert(locations));
+      }
     }
     return builder.build();
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java Tue Dec 13 19:02:37 2011
@@ -112,7 +112,6 @@ public class BlockInfoUnderConstruction 
       return (this == obj) || super.equals(obj);
     }
 
-    /** {@inheritDoc} */
     @Override
     public String toString() {
       final StringBuilder b = new StringBuilder(getClass().getSimpleName());
@@ -271,7 +270,6 @@ public class BlockInfoUnderConstruction 
     return (this == obj) || super.equals(obj);
   }
 
-  /** {@inheritDoc} */
   @Override
   public String toString() {
     final StringBuilder b = new StringBuilder(super.toString());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Dec 13 19:02:37 2011
@@ -168,9 +168,6 @@ public class BlockManager {
   /** variable to enable check for enough racks */
   final boolean shouldCheckForEnoughRacks;
 
-  /** Last block index used for replication work. */
-  private int replIndex = 0;
-
   /** for block replicas placement */
   private BlockPlacementPolicy blockplacement;
   
@@ -923,74 +920,16 @@ public class BlockManager {
    * @return number of blocks scheduled for replication during this iteration.
    */
   private int computeReplicationWork(int blocksToProcess) throws IOException {
-    // Choose the blocks to be replicated
-    List<List<Block>> blocksToReplicate =
-      chooseUnderReplicatedBlocks(blocksToProcess);
-
-    // replicate blocks
-    return computeReplicationWorkForBlocks(blocksToReplicate);
-  }
-
-  /**
-   * Get a list of block lists to be replicated The index of block lists
-   * represents the
-   *
-   * @param blocksToProcess
-   * @return Return a list of block lists to be replicated. The block list index
-   *         represents its replication priority.
-   */
-  private List<List<Block>> chooseUnderReplicatedBlocks(int blocksToProcess) {
-    // initialize data structure for the return value
-    List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(
-        UnderReplicatedBlocks.LEVEL);
-    for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) {
-      blocksToReplicate.add(new ArrayList<Block>());
-    }
+    List<List<Block>> blocksToReplicate = null;
     namesystem.writeLock();
     try {
-      synchronized (neededReplications) {
-        if (neededReplications.size() == 0) {
-          return blocksToReplicate;
-        }
-
-        // Go through all blocks that need replications.
-        UnderReplicatedBlocks.BlockIterator neededReplicationsIterator = 
-            neededReplications.iterator();
-        // skip to the first unprocessed block, which is at replIndex
-        for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
-          neededReplicationsIterator.next();
-        }
-        // # of blocks to process equals either twice the number of live
-        // data-nodes or the number of under-replicated blocks whichever is less
-        blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
-
-        for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
-          if (!neededReplicationsIterator.hasNext()) {
-            // start from the beginning
-            replIndex = 0;
-            blocksToProcess = Math.min(blocksToProcess, neededReplications
-                .size());
-            if (blkCnt >= blocksToProcess)
-              break;
-            neededReplicationsIterator = neededReplications.iterator();
-            assert neededReplicationsIterator.hasNext() : "neededReplications should not be empty.";
-          }
-
-          Block block = neededReplicationsIterator.next();
-          int priority = neededReplicationsIterator.getPriority();
-          if (priority < 0 || priority >= blocksToReplicate.size()) {
-            LOG.warn("Unexpected replication priority: "
-                + priority + " " + block);
-          } else {
-            blocksToReplicate.get(priority).add(block);
-          }
-        } // end for
-      } // end synchronized neededReplication
+      // Choose the blocks to be replicated
+      blocksToReplicate = neededReplications
+          .chooseUnderReplicatedBlocks(blocksToProcess);
     } finally {
       namesystem.writeUnlock();
     }
-
-    return blocksToReplicate;
+    return computeReplicationWorkForBlocks(blocksToReplicate);
   }
 
   /** Replicate a set of blocks
@@ -1019,7 +958,7 @@ public class BlockManager {
             // abandoned block or block reopened for append
             if(fileINode == null || fileINode.isUnderConstruction()) {
               neededReplications.remove(block, priority); // remove from neededReplications
-              replIndex--;
+              neededReplications.decrementReplicationIndex(priority);
               continue;
             }
 
@@ -1043,7 +982,7 @@ public class BlockManager {
               if ( (pendingReplications.getNumReplicas(block) > 0) ||
                    (blockHasEnoughRacks(block)) ) {
                 neededReplications.remove(block, priority); // remove from neededReplications
-                replIndex--;
+                neededReplications.decrementReplicationIndex(priority);
                 NameNode.stateChangeLog.info("BLOCK* "
                     + "Removing block " + block
                     + " from neededReplications as it has enough replicas.");
@@ -1104,7 +1043,7 @@ public class BlockManager {
           if(fileINode == null || fileINode.isUnderConstruction()) {
             neededReplications.remove(block, priority); // remove from neededReplications
             rw.targets = null;
-            replIndex--;
+            neededReplications.decrementReplicationIndex(priority);
             continue;
           }
           requiredReplication = fileINode.getReplication();
@@ -1118,7 +1057,7 @@ public class BlockManager {
             if ( (pendingReplications.getNumReplicas(block) > 0) ||
                  (blockHasEnoughRacks(block)) ) {
               neededReplications.remove(block, priority); // remove from neededReplications
-              replIndex--;
+              neededReplications.decrementReplicationIndex(priority);
               rw.targets = null;
               NameNode.stateChangeLog.info("BLOCK* "
                   + "Removing block " + block
@@ -1156,7 +1095,7 @@ public class BlockManager {
           // remove from neededReplications
           if(numEffectiveReplicas + targets.length >= requiredReplication) {
             neededReplications.remove(block, priority); // remove from neededReplications
-            replIndex--;
+            neededReplications.decrementReplicationIndex(priority);
           }
         }
       }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Dec 13 19:02:37 2011
@@ -63,7 +63,7 @@ public class BlockPlacementPolicyDefault
   BlockPlacementPolicyDefault() {
   }
     
-  /** {@inheritDoc} */
+  @Override
   public void initialize(Configuration conf,  FSClusterStats stats,
                          NetworkTopology clusterMap) {
     this.considerLoad = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
@@ -79,7 +79,7 @@ public class BlockPlacementPolicyDefault
     }
   };
 
-  /** {@inheritDoc} */
+  @Override
   public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
                                     DatanodeDescriptor writer,
@@ -89,7 +89,6 @@ public class BlockPlacementPolicyDefault
         null, blocksize);
   }
 
-  /** {@inheritDoc} */
   @Override
   public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
@@ -525,7 +524,7 @@ public class BlockPlacementPolicyDefault
     return nodes;
   }
 
-  /** {@inheritDoc} */
+  @Override
   public int verifyBlockPlacement(String srcPath,
                                   LocatedBlock lBlk,
                                   int minRacks) {
@@ -544,7 +543,7 @@ public class BlockPlacementPolicyDefault
     return minRacks - racks.size();
   }
 
-  /** {@inheritDoc} */
+  @Override
   public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
                                                  Block block,
                                                  short replicationFactor,

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java Tue Dec 13 19:02:37 2011
@@ -18,8 +18,11 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -81,10 +84,14 @@ class UnderReplicatedBlocks implements I
   private List<LightWeightLinkedSet<Block>> priorityQueues
       = new ArrayList<LightWeightLinkedSet<Block>>();
 
+  /** Stores the replication index for each priority */
+  private Map<Integer, Integer> priorityToReplIdx = new HashMap<Integer, Integer>(LEVEL);
+  
   /** Create an object. */
   UnderReplicatedBlocks() {
     for (int i = 0; i < LEVEL; i++) {
       priorityQueues.add(new LightWeightLinkedSet<Block>());
+      priorityToReplIdx.put(i, 0);
     }
   }
 
@@ -300,6 +307,70 @@ class UnderReplicatedBlocks implements I
       }
     }
   }
+  
+  /**
+   * Get a list of block lists to be replicated. The index of block lists
+   * represents its replication priority. Replication index will be tracked for
+   * each priority list separately in priorityToReplIdx map. Iterates through
+   * all priority lists and find the elements after replication index. Once the
+   * last priority lists reaches to end, all replication indexes will be set to
+   * 0 and start from 1st priority list to fulfill the blockToProces count.
+   * 
+   * @param blocksToProcess - number of blocks to fetch from underReplicated blocks.
+   * @return Return a list of block lists to be replicated. The block list index
+   *         represents its replication priority.
+   */
+  public synchronized List<List<Block>> chooseUnderReplicatedBlocks(
+      int blocksToProcess) {
+    // initialize data structure for the return value
+    List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(LEVEL);
+    for (int i = 0; i < LEVEL; i++) {
+      blocksToReplicate.add(new ArrayList<Block>());
+    }
+
+    if (size() == 0) { // There are no blocks to collect.
+      return blocksToReplicate;
+    }
+    
+    int blockCount = 0;
+    for (int priority = 0; priority < LEVEL; priority++) { 
+      // Go through all blocks that need replications with current priority.
+      BlockIterator neededReplicationsIterator = iterator(priority);
+      Integer replIndex = priorityToReplIdx.get(priority);
+      
+      // skip to the first unprocessed block, which is at replIndex
+      for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
+        neededReplicationsIterator.next();
+      }
+
+      blocksToProcess = Math.min(blocksToProcess, size());
+      
+      if (blockCount == blocksToProcess) {
+        break;  // break if already expected blocks are obtained
+      }
+      
+      // Loop through all remaining blocks in the list.
+      while (blockCount < blocksToProcess
+          && neededReplicationsIterator.hasNext()) {
+        Block block = neededReplicationsIterator.next();
+        blocksToReplicate.get(priority).add(block);
+        replIndex++;
+        blockCount++;
+      }
+      
+      if (!neededReplicationsIterator.hasNext()
+          && neededReplicationsIterator.getPriority() == LEVEL - 1) {
+        // reset all priorities replication index to 0 because there is no
+        // recently added blocks in any list.
+        for (int i = 0; i < LEVEL; i++) {
+          priorityToReplIdx.put(i, 0);
+        }
+        break;
+      }
+      priorityToReplIdx.put(priority, replIndex); 
+    }
+    return blocksToReplicate;
+  }
 
   /** returns an iterator of all blocks in a given priority queue */
   synchronized BlockIterator iterator(int level) {
@@ -380,4 +451,14 @@ class UnderReplicatedBlocks implements I
       return level;
     }
   }
+
+  /**
+   * This method is to decrement the replication index for the given priority
+   * 
+   * @param priority  - int priority level
+   */
+  public void decrementReplicationIndex(int priority) {
+    Integer replIdx = priorityToReplIdx.get(priority);
+    priorityToReplIdx.put(priority, --replIdx); 
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Dec 13 19:02:37 2011
@@ -1788,7 +1788,7 @@ public class DataNode extends Configured
     return new ExtendedBlock(oldBlock.getBlockPoolId(), r);
   }
 
-  /** {@inheritDoc} */
+  @Override
   public long getProtocolVersion(String protocol, long clientVersion
       ) throws IOException {
     if (protocol.equals(InterDatanodeProtocol.class.getName())) {
@@ -1821,7 +1821,7 @@ public class DataNode extends Configured
       this.rInfo = rInfo;
     }
 
-    /** {@inheritDoc} */
+    @Override
     public String toString() {
       return "block:" + rInfo + " node:" + id;
     }
@@ -2006,7 +2006,6 @@ public class DataNode extends Configured
   }
 
   // ClientDataNodeProtocol implementation
-  /** {@inheritDoc} */
   @Override // ClientDataNodeProtocol
   public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
     checkWriteAccess(block);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java Tue Dec 13 19:02:37 2011
@@ -193,7 +193,7 @@ public interface FSDatasetInterface exte
       this.checksumIn = checksumIn;
     }
 
-    /** {@inheritDoc} */
+    @Override
     public void close() {
       IOUtils.closeStream(dataIn);
       IOUtils.closeStream(checksumIn);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java Tue Dec 13 19:02:37 2011
@@ -40,7 +40,7 @@ public class ContentSummaryServlet exten
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
   
-  /** {@inheritDoc} */
+  @Override
   public void doGet(final HttpServletRequest request,
       final HttpServletResponse response) throws ServletException, IOException {
     final Configuration conf = 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java Tue Dec 13 19:02:37 2011
@@ -71,7 +71,6 @@ class EditLogFileOutputStream extends Ed
     fc.position(fc.size());
   }
 
-  /** {@inheritDoc} */
   @Override
   public void write(FSEditLogOp op) throws IOException {
     doubleBuf.writeOp(op);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Tue Dec 13 19:02:37 2011
@@ -80,7 +80,7 @@ public class FileChecksumServlets {
           dtParam + addrParam);
     }
 
-    /** {@inheritDoc} */
+    @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response
         ) throws ServletException, IOException {
       final ServletContext context = getServletContext();
@@ -104,7 +104,7 @@ public class FileChecksumServlets {
     /** For java.io.Serializable */
     private static final long serialVersionUID = 1L;
     
-    /** {@inheritDoc} */
+    @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response
         ) throws ServletException, IOException {
       final PrintWriter out = response.getWriter();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Tue Dec 13 19:02:37 2011
@@ -261,13 +261,13 @@ public abstract class INode implements C
     this.name = name;
   }
 
-  /** {@inheritDoc} */
+  @Override
   public String getFullPathName() {
     // Get the full path name of this inode.
     return FSDirectory.getFullPathName(this);
   }
 
-  /** {@inheritDoc} */
+  @Override
   public String toString() {
     return "\"" + getFullPathName() + "\":"
     + getUserName() + ":" + getGroupName() + ":"

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Tue Dec 13 19:02:37 2011
@@ -372,7 +372,7 @@ class INodeDirectory extends INode {
     return parent;
   }
 
-  /** {@inheritDoc} */
+  @Override
   DirCounts spaceConsumedInTree(DirCounts counts) {
     counts.nsCount += 1;
     if (children != null) {
@@ -383,7 +383,7 @@ class INodeDirectory extends INode {
     return counts;    
   }
 
-  /** {@inheritDoc} */
+  @Override
   long[] computeContentSummary(long[] summary) {
     // Walk through the children of this node, using a new summary array
     // for the (sub)tree rooted at this node

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Tue Dec 13 19:02:37 2011
@@ -168,7 +168,7 @@ public class INodeFile extends INode {
     return 1;
   }
 
-  /** {@inheritDoc} */
+  @Override
   long[] computeContentSummary(long[] summary) {
     summary[0] += computeFileSize(true);
     summary[1]++;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Tue Dec 13 19:02:37 2011
@@ -252,13 +252,13 @@ public class LeaseManager {
       return paths.remove(src);
     }
 
-    /** {@inheritDoc} */
+    @Override
     public String toString() {
       return "[Lease.  Holder: " + holder
           + ", pendingcreates: " + paths.size() + "]";
     }
   
-    /** {@inheritDoc} */
+    @Override
     public int compareTo(Lease o) {
       Lease l1 = this;
       Lease l2 = o;
@@ -273,7 +273,7 @@ public class LeaseManager {
       }
     }
   
-    /** {@inheritDoc} */
+    @Override
     public boolean equals(Object o) {
       if (!(o instanceof Lease)) {
         return false;
@@ -286,7 +286,7 @@ public class LeaseManager {
       return false;
     }
   
-    /** {@inheritDoc} */
+    @Override
     public int hashCode() {
       return holder.hashCode();
     }
@@ -436,7 +436,7 @@ public class LeaseManager {
     }
   }
 
-  /** {@inheritDoc} */
+  @Override
   public synchronized String toString() {
     return getClass().getSimpleName() + "= {"
         + "\n leases=" + leases

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Dec 13 19:02:37 2011
@@ -647,7 +647,7 @@ public class NamenodeFsck {
       return (float) (totalReplicas) / (float) totalBlocks;
     }
     
-    /** {@inheritDoc} */
+    @Override
     public String toString() {
       StringBuilder res = new StringBuilder();
       res.append("Status: ").append((isHealthy() ? "HEALTHY" : "CORRUPT"))

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Dec 13 19:02:37 2011
@@ -129,7 +129,7 @@ public class SecondaryNameNode implement
   private FSNamesystem namesystem;
 
 
-  /** {@inheritDoc} */
+  @Override
   public String toString() {
     return getClass().getSimpleName() + " Status" 
       + "\nName Node Address    : " + nameNodeAddr   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java Tue Dec 13 19:02:37 2011
@@ -64,7 +64,7 @@ class SerialNumberManager {
       return i2t.get(i);
     }
 
-    /** {@inheritDoc} */
+    @Override
     public String toString() {
       return "max=" + max + ",\n  t2i=" + t2i + ",\n  i2t=" + i2t;
     }



Mime
View raw message