hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bor...@apache.org
Subject svn commit: r1076001 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apac...
Date Tue, 01 Mar 2011 19:54:06 GMT
Author: boryas
Date: Tue Mar  1 19:54:05 2011
New Revision: 1076001

URL: http://svn.apache.org/viewvc?rev=1076001&view=rev
Log:
HDFS-1663.HDFS federation: Rename getPoolId() everywhere to getBlockPoolId()

Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
    hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Mar  1 19:54:05 2011
@@ -91,6 +91,9 @@ Trunk (unreleased changes)
     HDFS-1671. HDFS Federation: shutdown in DataNode should be able to 
     shutdown individual BP threads as well as the whole DN (boryas).
 
+    HDFS-1663. HDFS federation: Rename getPoolId() everywhere to 
+    getBlockPoolId() (tanping via boryas)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/BlockReader.java Tue Mar  1 19:54:05 2011
@@ -394,13 +394,13 @@ public class BlockReader extends FSInput
             "Got access token error for OP_READ_BLOCK, self="
                 + sock.getLocalSocketAddress() + ", remote="
                 + sock.getRemoteSocketAddress() + ", for file " + file
-                + ", for pool " + block.getPoolId() + " block " 
+                + ", for pool " + block.getBlockPoolId() + " block " 
                 + block.getBlockId() + "_" + block.getGenerationStamp());
       } else {
         throw new IOException("Got error for OP_READ_BLOCK, self="
             + sock.getLocalSocketAddress() + ", remote="
             + sock.getRemoteSocketAddress() + ", for file " + file
-            + ", for pool " + block.getPoolId() + " block " 
+            + ", for pool " + block.getBlockPoolId() + " block " 
             + block.getBlockId() + "_" + block.getGenerationStamp());
       }
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Mar  1 19:54:05 2011
@@ -797,7 +797,7 @@ class DFSOutputStream extends FSOutputSu
       if (success) {
         // update pipeline at the namenode
         ExtendedBlock newBlock = new ExtendedBlock(
-            block.getPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
+            block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
         dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock, nodes);
         // update client side generation stamp
         block = newBlock;

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java Tue Mar  1 19:54:05 2011
@@ -90,7 +90,7 @@ public class ExtendedBlock implements Wr
     block.readId(in);
   }
   
-  public String getPoolId() {
+  public String getBlockPoolId() {
     return poolId;
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Tue Mar  1 19:54:05 2011
@@ -115,7 +115,7 @@ class BlockReceiver implements java.io.C
         case PIPELINE_SETUP_APPEND:
           replicaInfo = datanode.data.append(block, newGs, minBytesRcvd);
           if (datanode.blockScanner != null) { // remove from block scanner
-            datanode.blockScanner.deleteBlock(block.getPoolId(),
+            datanode.blockScanner.deleteBlock(block.getBlockPoolId(),
                 block.getLocalBlock());
           }
           block.setGenerationStamp(newGs);
@@ -123,7 +123,7 @@ class BlockReceiver implements java.io.C
         case PIPELINE_SETUP_APPEND_RECOVERY:
           replicaInfo = datanode.data.recoverAppend(block, newGs, minBytesRcvd);
           if (datanode.blockScanner != null) { // remove from block scanner
-            datanode.blockScanner.deleteBlock(block.getPoolId(),
+            datanode.blockScanner.deleteBlock(block.getBlockPoolId(),
                 block.getLocalBlock());
           }
           block.setGenerationStamp(newGs);
@@ -228,7 +228,7 @@ class BlockReceiver implements java.io.C
    * affect this datanode unless it is caused by interruption.
    */
   private void handleMirrorOutError(IOException ioe) throws IOException {
-    String bpid = block.getPoolId();
+    String bpid = block.getBlockPoolId();
     LOG.info(datanode.getDNRegistrationForBP(bpid) + ":Exception writing block " +
              block + " to mirror " + mirrorAddr + "\n" +
              StringUtils.stringifyException(ioe));
@@ -248,7 +248,7 @@ class BlockReceiver implements java.io.C
   private void verifyChunks( byte[] dataBuf, int dataOff, int len, 
                              byte[] checksumBuf, int checksumOff ) 
                              throws IOException {
-    DatanodeProtocol nn = datanode.getBPNamenode(block.getPoolId());
+    DatanodeProtocol nn = datanode.getBPNamenode(block.getBlockPoolId());
     while (len > 0) {
       int chunkLen = Math.min(len, bytesPerChecksum);
       
@@ -907,7 +907,7 @@ class BlockReceiver implements java.io.C
                   receiver.clientName.length() > 0) {
                 long offset = 0;
                 DatanodeRegistration dnR = 
-                  datanode.getDNRegistrationForBP(block.getPoolId());
+                  datanode.getDNRegistrationForBP(block.getBlockPoolId());
                 ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                       receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                       "HDFS_WRITE", receiver.clientName, offset,

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Tue Mar  1 19:54:05 2011
@@ -95,7 +95,7 @@ class BlockSender implements java.io.Clo
     try {
       this.block = block;
       synchronized(datanode.data) { 
-        this.replica = datanode.data.getReplica(block.getPoolId(), 
+        this.replica = datanode.data.getReplica(block.getBlockPoolId(), 
             block.getBlockId());
         if (replica == null) {
           throw new ReplicaNotFoundException(block);
@@ -195,7 +195,7 @@ class BlockSender implements java.io.Clo
           || (length + startOffset) > endOffset) {
         String msg = " Offset " + startOffset + " and length " + length
         + " don't match block " + block + " ( blockLen " + endOffset + " )";
-        LOG.warn(datanode.getDNRegistrationForBP(block.getPoolId()) +
+        LOG.warn(datanode.getDNRegistrationForBP(block.getBlockPoolId()) +
             ":sendBlock() : " + msg);
         throw new IOException(msg);
       }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Tue Mar  1 19:54:05 2011
@@ -457,10 +457,10 @@ class DataBlockScanner implements Runnab
         updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
 
         // If the block does not exists anymore, then its not an error
-        if ( dataset.getFile(block.getPoolId(), block.getLocalBlock()) == null ) {
+        if ( dataset.getFile(block.getBlockPoolId(), block.getLocalBlock()) == null ) {
           LOG.info("Verification failed for " + block + ". Its ok since " +
           "it not in datanode dataset anymore.");
-          deleteBlock(block.getPoolId(), block.getLocalBlock());
+          deleteBlock(block.getBlockPoolId(), block.getLocalBlock());
           return;
         }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Mar  1 19:54:05 2011
@@ -541,19 +541,19 @@ public class DataNode extends Configured
   
   // calls specific to BP
   protected void notifyNamenodeReceivedBlock(ExtendedBlock block, String delHint) {
-    BPOfferService bpos = blockPoolManager.get(block.getPoolId());
+    BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos != null) {
       bpos.notifyNamenodeReceivedBlock(block, delHint); 
     } else {
       LOG.warn("Cannot find BPOfferService for reporting block received for bpid="
-          + block.getPoolId());
+          + block.getBlockPoolId());
     }
   }
   
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
-    BPOfferService bpos = blockPoolManager.get(block.getPoolId());
+    BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos == null || bpos.bpNamenode == null) {
-      throw new IOException("cannot locate OfferService thread for bp="+block.getPoolId());
+      throw new IOException("cannot locate OfferService thread for bp="+block.getBlockPoolId());
     }
     bpos.reportBadBlocks(block);
   }
@@ -776,8 +776,8 @@ public class DataNode extends Configured
             block==null?"Block is null":"delHint is null");
       }
       
-      if (!block.getPoolId().equals(blockPoolId)) {
-        LOG.warn("BlockPool mismatch " + block.getPoolId() + 
+      if (!block.getBlockPoolId().equals(blockPoolId)) {
+        LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + 
             " vs. " + blockPoolId);
         return;
       }
@@ -1150,7 +1150,7 @@ public class DataNode extends Configured
       switch(cmd.getAction()) {
       case DatanodeProtocol.DNA_TRANSFER:
         // Send a copy of a block to another datanode
-        transferBlocks(bcmd.getPoolId(), bcmd.getBlocks(), bcmd.getTargets());
+        transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
         myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
         break;
       case DatanodeProtocol.DNA_INVALIDATE:
@@ -1161,10 +1161,10 @@ public class DataNode extends Configured
         Block toDelete[] = bcmd.getBlocks();
         try {
           if (blockScanner != null) {
-            blockScanner.deleteBlocks(bcmd.getPoolId(), toDelete);
+            blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete);
           }
           // using global fsdataset
-          data.invalidate(bcmd.getPoolId(), toDelete);
+          data.invalidate(bcmd.getBlockPoolId(), toDelete);
         } catch(IOException e) {
           checkDiskError();
           throw e;
@@ -1600,8 +1600,8 @@ public class DataNode extends Configured
   private void transferBlock( ExtendedBlock block, 
                               DatanodeInfo xferTargets[] 
                               ) throws IOException {
-    DatanodeProtocol nn = getBPNamenode(block.getPoolId());
-    DatanodeRegistration bpReg = getDNRegistrationForBP(block.getPoolId());
+    DatanodeProtocol nn = getBPNamenode(block.getBlockPoolId());
+    DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
     
     if (!data.isValidBlock(block)) {
       // block does not exist or is under-construction
@@ -1752,7 +1752,7 @@ public class DataNode extends Configured
       this.targets = targets;
       this.b = b;
       this.datanode = datanode;
-      BPOfferService bpos = blockPoolManager.get(b.getPoolId());
+      BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
       bpReg = bpos.bpRegistration;
     }
 
@@ -1823,12 +1823,12 @@ public class DataNode extends Configured
    */
   void closeBlock(ExtendedBlock block, String delHint) {
     myMetrics.blocksWritten.inc();
-    BPOfferService bpos = blockPoolManager.get(block.getPoolId());
+    BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos != null) {
       bpos.notifyNamenodeReceivedBlock(block, delHint);
     } else {
       LOG.warn("Cannot find BPOfferService for reporting block received for bpid="
-          + block.getPoolId());
+          + block.getBlockPoolId());
     }
     if (blockScanner != null) {
       blockScanner.addBlock(block);
@@ -2131,7 +2131,7 @@ public class DataNode extends Configured
                                           long newLength) throws IOException {
     ReplicaInfo r = data.updateReplicaUnderRecovery(oldBlock,
         recoveryId, newLength);
-    return new ExtendedBlock(oldBlock.getPoolId(), r);
+    return new ExtendedBlock(oldBlock.getBlockPoolId(), r);
   }
 
   /** {@inheritDoc} */
@@ -2169,7 +2169,7 @@ public class DataNode extends Configured
   /** Recover a block */
   private void recoverBlock(RecoveringBlock rBlock) throws IOException {
     ExtendedBlock block = rBlock.getBlock();
-    String blookPoolId = block.getPoolId();
+    String blookPoolId = block.getBlockPoolId();
     DatanodeInfo[] targets = rBlock.getLocations();
     DatanodeID[] datanodeids = (DatanodeID[])targets;
     List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
@@ -2229,7 +2229,7 @@ public class DataNode extends Configured
   void syncBlock(RecoveringBlock rBlock,
                          List<BlockRecord> syncList) throws IOException {
     ExtendedBlock block = rBlock.getBlock();
-    DatanodeProtocol nn = getBPNamenode(block.getPoolId());
+    DatanodeProtocol nn = getBPNamenode(block.getBlockPoolId());
     
     long recoveryId = rBlock.getNewGenerationStamp();
     if (LOG.isDebugEnabled()) {
@@ -2265,7 +2265,7 @@ public class DataNode extends Configured
     // Calculate list of nodes that will participate in the recovery
     // and the new block size
     List<BlockRecord> participatingList = new ArrayList<BlockRecord>();
-    final ExtendedBlock newBlock = new ExtendedBlock(block.getPoolId(), block
+    final ExtendedBlock newBlock = new ExtendedBlock(block.getBlockPoolId(), block
         .getBlockId(), -1, recoveryId);
     switch(bestState) {
     case FINALIZED:
@@ -2301,7 +2301,7 @@ public class DataNode extends Configured
     for(BlockRecord r : participatingList) {
       try {
         ExtendedBlock reply = r.datanode.updateReplicaUnderRecovery(
-            new ExtendedBlock(newBlock.getPoolId(), r.rInfo), recoveryId,
+            new ExtendedBlock(newBlock.getBlockPoolId(), r.rInfo), recoveryId,
             newBlock.getNumBytes());
         assert reply.equals(newBlock) &&
                reply.getNumBytes() == newBlock.getNumBytes() :

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Tue Mar  1 19:54:05 2011
@@ -159,7 +159,7 @@ class DataXceiver extends DataTransferPr
     // send the block
     BlockSender blockSender = null;
     DatanodeRegistration dnR = 
-      datanode.getDNRegistrationForBP(block.getPoolId());
+      datanode.getDNRegistrationForBP(block.getBlockPoolId());
     final String clientTraceFmt =
       clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
         ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
@@ -239,7 +239,7 @@ class DataXceiver extends DataTransferPr
     replyOut = new DataOutputStream(
                    NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
     DatanodeRegistration dnR = 
-      datanode.getDNRegistrationForBP(block.getPoolId());
+      datanode.getDNRegistrationForBP(block.getBlockPoolId());
     if (datanode.isBlockTokenEnabled) {
       try {
         datanode.blockTokenSecretManager.checkAccess(blockToken, null, block,

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Tue Mar  1 19:54:05 2011
@@ -414,7 +414,7 @@ public class DatanodeJspHelper {
       }
       for (int i = 0; i < blks.size(); i++) {
         if (blks.get(i).getBlock().getBlockId() == blockId) {
-          bpid = blks.get(i).getBlock().getPoolId();
+          bpid = blks.get(i).getBlock().getBlockPoolId();
           blockToken = blks.get(i).getBlockToken();
           break;
         }
@@ -649,7 +649,7 @@ public class DatanodeJspHelper {
       return;
     }
     LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
-    String poolId = lastBlk.getBlock().getPoolId();
+    String poolId = lastBlk.getBlock().getBlockPoolId();
     long blockSize = lastBlk.getBlock().getNumBytes();
     long blockId = lastBlk.getBlock().getBlockId();
     Token<BlockTokenIdentifier> accessToken = lastBlk.getBlockToken();

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Tue Mar  1 19:54:05 2011
@@ -1143,7 +1143,7 @@ public class FSDataset implements FSCons
    * Get File name for a given block.
    */
   public File getBlockFile(ExtendedBlock b) throws IOException {
-    return getBlockFile(b.getPoolId(), b.getLocalBlock());
+    return getBlockFile(b.getBlockPoolId(), b.getLocalBlock());
   }
   
   /**
@@ -1184,7 +1184,7 @@ public class FSDataset implements FSCons
    * @return the meta replica information
    */
   private ReplicaInfo getReplicaInfo(ExtendedBlock b) {
-    return volumeMap.get(b.getPoolId(), b.getLocalBlock());
+    return volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
   }
   
   /**
@@ -1337,7 +1337,7 @@ public class FSDataset implements FSCons
           " expected length is " + expectedBlockLen);
     }
 
-    return append(b.getPoolId(), (FinalizedReplica)replicaInfo, newGS,
+    return append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
         b.getNumBytes());
   }
   
@@ -1464,7 +1464,7 @@ public class FSDataset implements FSCons
 
     // change the replica's state/gs etc.
     if (replicaInfo.getState() == ReplicaState.FINALIZED ) {
-      return append(b.getPoolId(), (FinalizedReplica) replicaInfo, newGS, 
+      return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS, 
           b.getNumBytes());
     } else { //RBW
       bumpReplicaGS(replicaInfo, newGS);
@@ -1483,7 +1483,7 @@ public class FSDataset implements FSCons
     bumpReplicaGS(replicaInfo, newGS);
     // finalize the replica if RBW
     if (replicaInfo.getState() == ReplicaState.RBW) {
-      finalizeReplica(b.getPoolId(), replicaInfo);
+      finalizeReplica(b.getBlockPoolId(), replicaInfo);
     }
   }
   
@@ -1517,7 +1517,7 @@ public class FSDataset implements FSCons
   @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
       throws IOException {
-    ReplicaInfo replicaInfo = volumeMap.get(b.getPoolId(), b.getBlockId());
+    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
     if (replicaInfo != null) {
       throw new ReplicaAlreadyExistsException("Block " + b +
       " already exists in state " + replicaInfo.getState() +
@@ -1526,10 +1526,10 @@ public class FSDataset implements FSCons
     // create a new block
     FSVolume v = volumes.getNextVolume(b.getNumBytes());
     // create a rbw file to hold block in the designated volume
-    File f = v.createRbwFile(b.getPoolId(), b.getLocalBlock());
+    File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
     ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
-    volumeMap.add(b.getPoolId(), newReplicaInfo);
+    volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
     return newReplicaInfo;
   }
   
@@ -1539,7 +1539,7 @@ public class FSDataset implements FSCons
       throws IOException {
     DataNode.LOG.info("Recover the RBW replica " + b);
 
-    ReplicaInfo replicaInfo = volumeMap.get(b.getPoolId(), b.getBlockId());
+    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
     if (replicaInfo == null) {
       throw new ReplicaNotFoundException(
           ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
@@ -1585,7 +1585,7 @@ public class FSDataset implements FSCons
   @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
       throws IOException {
-    ReplicaInfo replicaInfo = volumeMap.get(b.getPoolId(), b.getBlockId());
+    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
     if (replicaInfo != null) {
       throw new ReplicaAlreadyExistsException("Block " + b +
           " already exists in state " + replicaInfo.getState() +
@@ -1594,10 +1594,10 @@ public class FSDataset implements FSCons
     
     FSVolume v = volumes.getNextVolume(b.getNumBytes());
     // create a temporary file to hold block in the designated volume
-    File f = v.createTmpFile(b.getPoolId(), b.getLocalBlock());
+    File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
     ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
-    volumeMap.add(b.getPoolId(), newReplicaInfo);
+    volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
     
     return newReplicaInfo;
   }
@@ -1647,7 +1647,7 @@ public class FSDataset implements FSCons
       // been opened for append but never modified
       return;
     }
-    finalizeReplica(b.getPoolId(), replicaInfo);
+    finalizeReplica(b.getBlockPoolId(), replicaInfo);
   }
   
   private synchronized FinalizedReplica finalizeReplica(String bpid,
@@ -1681,7 +1681,7 @@ public class FSDataset implements FSCons
     ReplicaInfo replicaInfo = getReplicaInfo(b);
     if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
       // remove from volumeMap
-      volumeMap.remove(b.getPoolId(), b.getLocalBlock());
+      volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
       
       // delete the on-disk temp file
       if (delBlockFromDisk(replicaInfo.getBlockFile(), 
@@ -2211,7 +2211,7 @@ public class FSDataset implements FSCons
   @Override // FSDatasetInterface
   public synchronized ReplicaRecoveryInfo initReplicaRecovery(
       RecoveringBlock rBlock) throws IOException {
-    return initReplicaRecovery(rBlock.getBlock().getPoolId(),
+    return initReplicaRecovery(rBlock.getBlock().getBlockPoolId(),
         volumeMap, rBlock.getBlock().getLocalBlock(), rBlock.getNewGenerationStamp());
   }
 
@@ -2287,7 +2287,7 @@ public class FSDataset implements FSCons
                                     final long recoveryId,
                                     final long newlength) throws IOException {
     //get replica
-    final ReplicaInfo replica = volumeMap.get(oldBlock.getPoolId(), 
+    final ReplicaInfo replica = volumeMap.get(oldBlock.getBlockPoolId(), 
         oldBlock.getBlockId());
     DataNode.LOG.info("updateReplica: block=" + oldBlock
         + ", recoveryId=" + recoveryId
@@ -2317,7 +2317,7 @@ public class FSDataset implements FSCons
 
     //update replica
     final FinalizedReplica finalized = updateReplicaUnderRecovery(oldBlock
-        .getPoolId(), (ReplicaUnderRecovery) replica, recoveryId, newlength);
+        .getBlockPoolId(), (ReplicaUnderRecovery) replica, recoveryId, newlength);
 
     //check replica files after update
     checkReplicaFiles(finalized);
@@ -2358,7 +2358,7 @@ public class FSDataset implements FSCons
   @Override // FSDatasetInterface
   public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
   throws IOException {
-    final Replica replica = volumeMap.get(block.getPoolId(), block.getBlockId());
+    final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
     if (replica == null) {
       throw new ReplicaNotFoundException(block);
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Mar  1 19:54:05 2011
@@ -3232,8 +3232,8 @@ public class FSNamesystem implements FSC
   }
 
   private void checkBlock(ExtendedBlock block) throws IOException {
-    if (block != null && !this.blockPoolId.equals(block.getPoolId())) {
-      throw new IOException("Unexpected BlockPoolId " + block.getPoolId()
+    if (block != null && !this.blockPoolId.equals(block.getBlockPoolId())) {
+      throw new IOException("Unexpected BlockPoolId " + block.getBlockPoolId()
           + " - expected " + blockPoolId);
     }
   }
@@ -5292,7 +5292,7 @@ public class FSNamesystem implements FSC
     return dir.fsImage.getBlockPoolID();
   }
   
-  public String getPoolId() {
+  public String getBlockPoolId() {
     return blockPoolId;
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Mar  1 19:54:05 2011
@@ -311,7 +311,7 @@ public class NamenodeFsck {
       if (isCorrupt) {
         corrupt++;
         res.corruptBlocks++;
-        out.print("\n" + path + ": CORRUPT blockpool " + block.getPoolId() + 
+        out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + 
             " block " + block.getBlockName()+"\n");
       }
       if (locs.length >= minReplication)
@@ -503,7 +503,7 @@ public class NamenodeFsck {
         s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
         s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
         
-        String file = BlockReader.getFileName(targetAddr, block.getPoolId(),
+        String file = BlockReader.getFileName(targetAddr, block.getBlockPoolId(),
             block.getBlockId());
         blockReader = BlockReader.newBlockReader(s, file, block, lblock
             .getBlockToken(), 0, -1, conf.getInt("io.file.buffer.size", 4096));

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Tue Mar  1 19:54:05 2011
@@ -76,7 +76,7 @@ public class BlockCommand extends Datano
     this.targets = EMPTY_TARGET;
   }
 
-  public String getPoolId() {
+  public String getBlockPoolId() {
     return poolId;
   }
   

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Mar  1 19:54:05 2011
@@ -1160,7 +1160,7 @@ public class MiniDFSCluster {
     if (!(dataSet instanceof SimulatedFSDataset)) {
       throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
     }
-    String bpid = getNamesystem().getPoolId();
+    String bpid = getNamesystem().getBlockPoolId();
     SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
     sdataset.injectBlocks(bpid, blocksToInject);
     dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
@@ -1261,7 +1261,7 @@ public class MiniDFSCluster {
    * @return file corresponding to the block
    */
   public static File getBlockFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getPoolId()), blk
+    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk
         .getBlockName());
   }
   

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Tue Mar  1 19:54:05 2011
@@ -64,7 +64,7 @@ public class TestClientProtocolForPipeli
       // test getNewStampAndToken on a non-existent block
       try {
         long newBlockId = firstBlock.getBlockId() + 1;
-        ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getPoolId(),
+        ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
             newBlockId, 0, firstBlock.getGenerationStamp());
         namenode.updateBlockForPipeline(newBlock, "");
         Assert.fail("Cannot get a new GS from a non-existent block");

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java Tue Mar  1 19:54:05 2011
@@ -84,7 +84,7 @@ public class TestCrcCorruption {
       // However, a client is alowed access to this block.
       //
       File storageDir = MiniDFSCluster.getStorageDir(0, 1);
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Mar  1 19:54:05 2011
@@ -198,7 +198,7 @@ public class TestDFSRollback extends Tes
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
       startBlockPoolShouldFail(StartupOption.ROLLBACK, 
-          cluster.getNamesystem().getPoolId());
+          cluster.getNamesystem().getBlockPoolId());
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
@@ -220,7 +220,7 @@ public class TestDFSRollback extends Tes
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          Long.MAX_VALUE));
       startBlockPoolShouldFail(StartupOption.ROLLBACK, 
-          cluster.getNamesystem().getPoolId());
+          cluster.getNamesystem().getBlockPoolId());
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java Tue Mar  1 19:54:05 2011
@@ -1107,7 +1107,7 @@ public class TestDFSShell extends TestCa
   static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
     List<File> files = new ArrayList<File>();
     List<DataNode> datanodes = cluster.getDataNodes();
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
     for(int i = 0; i < blocks.length; i++) {
       FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Tue Mar  1 19:54:05 2011
@@ -241,7 +241,7 @@ public class TestDataTransferProtocol ex
 
       /* Test writing to a new block */
       long newBlockId = firstBlock.getBlockId() + 1;
-      ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getPoolId(),
+      ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
           newBlockId, 0, firstBlock.getGenerationStamp());
 
       // test PIPELINE_SETUP_CREATE on a new block
@@ -339,7 +339,7 @@ public class TestDataTransferProtocol ex
 
     // get the first blockid for the file
     final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
-    final String poolId = firstBlock.getPoolId();
+    final String poolId = firstBlock.getBlockPoolId();
     long newBlockId = firstBlock.getBlockId() + 1;
 
     recvBuf.reset();
@@ -420,7 +420,7 @@ public class TestDataTransferProtocol ex
     
     /* Test OP_READ_BLOCK */
 
-    String bpid = cluster.getNamesystem().getPoolId();
+    String bpid = cluster.getNamesystem().getBlockPoolId();
     ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock());
     long blkid = blk.getBlockId();
     // bad block id

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Tue Mar  1 19:54:05 2011
@@ -135,7 +135,7 @@ public class TestFileAppend extends Test
       //
       for (int i = 0; i < blocks.size(); i = i + 2) {
         ExtendedBlock b = blocks.get(i).getBlock();
-        File f = dataset.getFile(b.getPoolId(), b.getLocalBlock());
+        File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
         File link = new File(f.toString() + ".link");
         System.out.println("Creating hardlink for File " + f + " to " + link);
         HardLink.createHardLink(f, link);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Tue Mar  1 19:54:05 2011
@@ -268,7 +268,7 @@ public class TestFileAppend3 extends jun
       }
       for(DatanodeInfo datanodeinfo : lb.getLocations()) {
         final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
-        final Block metainfo = dn.data.getStoredBlock(blk.getPoolId(), 
+        final Block metainfo = dn.data.getStoredBlock(blk.getBlockPoolId(), 
             blk.getBlockId());
         assertEquals(size, metainfo.getNumBytes());
       }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Mar  1 19:54:05 2011
@@ -64,7 +64,7 @@ public class TestFileCorruption extends 
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
       File storageDir = MiniDFSCluster.getStorageDir(2, 0);
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();
@@ -124,7 +124,7 @@ public class TestFileCorruption extends 
       DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
       
       // get the block
-      final String bpid = cluster.getNamesystem().getPoolId();
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = MiniDFSCluster.getStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       ExtendedBlock blk = getBlock(bpid, dataDir);
@@ -142,7 +142,7 @@ public class TestFileCorruption extends 
       DataNode dataNode = datanodes.get(2);
       
       // report corrupted block by the third datanode
-      DatanodeRegistration dnR = dataNode.getDNRegistrationForBP(blk.getPoolId());
+      DatanodeRegistration dnR = dataNode.getDNRegistrationForBP(blk.getBlockPoolId());
       cluster.getNamesystem().markBlockAsCorrupt(blk, 
           new DatanodeInfo(dnR));
       

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Tue Mar  1 19:54:05 2011
@@ -764,8 +764,8 @@ public class TestFileCreation extends ju
         DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
         FSDataset dataset = (FSDataset)datanode.data;
         ExtendedBlock blk = locatedblock.getBlock();
-        Block b = dataset.getStoredBlock(blk.getPoolId(), blk.getBlockId());
-        File blockfile = dataset.findBlockFile(blk.getPoolId(), b.getBlockId());
+        Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
+        File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId());
         System.out.println("blockfile=" + blockfile);
         if (blockfile != null) {
           BufferedReader in = new BufferedReader(new FileReader(blockfile));

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Tue Mar  1 19:54:05 2011
@@ -140,7 +140,7 @@ public class TestInjectionForSimulatedSt
       //first time format
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                             cluster.getNameNodePort()),
                                             conf);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Tue Mar  1 19:54:05 2011
@@ -117,7 +117,7 @@ public class TestLeaseRecovery extends j
       long currentGS = lastblock.getGenerationStamp();
       for(int i = 0; i < REPLICATION_NUM; i++) {
         updatedmetainfo[i] = datanodes[i].data.getStoredBlock(lastblock
-            .getPoolId(), lastblock.getBlockId());
+            .getBlockPoolId(), lastblock.getBlockId());
         assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
         assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
         assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java Tue Mar  1 19:54:05 2011
@@ -103,7 +103,7 @@ public class TestPipelines {
     List<LocatedBlock> lb = cluster.getNameNode().getBlockLocations(
       filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
-    String bpid = cluster.getNamesystem().getPoolId();
+    String bpid = cluster.getNamesystem().getBlockPoolId();
     Replica r = DataNodeAdapter.fetchReplicaInfo(cluster.getDataNodes().get(0),
         bpid, lb.get(0).getBlock().getBlockId());
     assertTrue("Replica shouldn'e be null", r != null);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Tue Mar  1 19:54:05 2011
@@ -101,7 +101,7 @@ public class TestBalancer extends TestCa
       ExtendedBlock[] blocks = new ExtendedBlock[numOfBlocks];
       for(int i=0; i<numOfBlocks; i++) {
         ExtendedBlock b = locatedBlocks.get(i).getBlock();
-        blocks[i] = new ExtendedBlock(b.getPoolId(), b.getBlockId(), b
+        blocks[i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b
             .getNumBytes(), b.getGenerationStamp());
       }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Mar  1 19:54:05 2011
@@ -363,7 +363,7 @@ public class SimulatedFSDataset  impleme
 
   @Override // FSDatasetInterface
   public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("Finalizing a non existing block " + b);
@@ -422,7 +422,7 @@ public class SimulatedFSDataset  impleme
 
   @Override // FSDatasetInterface
   public synchronized long getLength(ExtendedBlock b) throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("Finalizing a non existing block " + b);
@@ -481,7 +481,7 @@ public class SimulatedFSDataset  impleme
 
   @Override // FSDatasetInterface
   public synchronized boolean isValidBlock(ExtendedBlock b) {
-    final Map<Block, BInfo> map = blockMap.get(b.getPoolId());
+    final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
     if (map == null) {
       return false;
     }
@@ -494,7 +494,7 @@ public class SimulatedFSDataset  impleme
 
   /* check if a block is created but not finalized */
   private synchronized boolean isBeingWritten(ExtendedBlock b) {
-    final Map<Block, BInfo> map = blockMap.get(b.getPoolId());
+    final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
     if (map == null) {
       return false;
     }
@@ -512,7 +512,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
       long newGS, long expectedBlockLen) throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null || !binfo.isFinalized()) {
       throw new ReplicaNotFoundException("Block " + b
@@ -525,7 +525,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
       long newGS, long expectedBlockLen) throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new ReplicaNotFoundException("Block " + b
@@ -543,7 +543,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
       throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new ReplicaNotFoundException("Block " + b
@@ -560,7 +560,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
       long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if ( binfo == null) {
       throw new ReplicaNotFoundException("Block " + b
@@ -593,7 +593,7 @@ public class SimulatedFSDataset  impleme
         throw new ReplicaAlreadyExistsException("Block " + b + 
             " is being written, and cannot be written to.");
     }
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = new BInfo(b.getLocalBlock(), true);
     map.put(binfo.theBlock, binfo);
     return binfo;
@@ -602,7 +602,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public synchronized InputStream getBlockInputStream(ExtendedBlock b)
       throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("No such Block " + b );  
@@ -635,7 +635,7 @@ public class SimulatedFSDataset  impleme
    */
   private synchronized InputStream getMetaDataInStream(ExtendedBlock b)
                                               throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("No such Block " + b );  
@@ -650,7 +650,7 @@ public class SimulatedFSDataset  impleme
   @Override // FSDatasetInterface
   public synchronized long getMetaDataLength(ExtendedBlock b)
       throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("No such Block " + b );  
@@ -857,7 +857,7 @@ public class SimulatedFSDataset  impleme
   public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
   throws IOException {
     ExtendedBlock b = rBlock.getBlock();
-    final Map<Block, BInfo> map = getMap(b.getPoolId());
+    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("No such Block " + b );  

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Tue Mar  1 19:54:05 2011
@@ -88,7 +88,7 @@ public class TestBlockReport {
     REPL_FACTOR = 1; //Reset if case a test has modified the value
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = (DistributedFileSystem) cluster.getFileSystem();
-    bpid = cluster.getNamesystem().getPoolId();
+    bpid = cluster.getNamesystem().getBlockPoolId();
   }
 
   @After
@@ -136,7 +136,7 @@ public class TestBlockReport {
     }
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     cluster.getNameNode().blockReport(dnR, poolId,
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -215,7 +215,7 @@ public class TestBlockReport {
 
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     cluster.getNameNode().blockReport(dnR, poolId,
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -254,7 +254,7 @@ public class TestBlockReport {
     
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     DatanodeCommand dnCmd = cluster.getNameNode().blockReport(dnR, poolId,
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -306,7 +306,7 @@ public class TestBlockReport {
 
  // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N1);
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     cluster.getNameNode().blockReport(dnR, poolId,
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -355,7 +355,7 @@ public class TestBlockReport {
     }
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N1);
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     cluster.getNameNode().blockReport(dnR, poolId,
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -427,7 +427,7 @@ public class TestBlockReport {
       
       // all blocks belong to the same file, hence same BP
       DataNode dn = cluster.getDataNodes().get(DN_N1);
-      String poolId = cluster.getNamesystem().getPoolId();
+      String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
       cluster.getNameNode().blockReport(dnR, poolId,
           new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -473,7 +473,7 @@ public class TestBlockReport {
                                                 
       // all blocks belong to the same file, hence same BP
       DataNode dn = cluster.getDataNodes().get(DN_N1);
-      String poolId = cluster.getNamesystem().getPoolId();
+      String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
       cluster.getNameNode().blockReport(dnR, poolId,
           new BlockListAsLongs(blocks, null).getBlockListAsLongs());
@@ -503,7 +503,7 @@ public class TestBlockReport {
       LOG.debug("Total number of DNs " + cluster.getDataNodes().size());
     }
     // Look about specified DN for the replica of the block from 1st DN
-    String bpid = cluster.getNamesystem().getPoolId();
+    String bpid = cluster.getNamesystem().getBlockPoolId();
     Replica r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()).
       fetchReplicaInfo(bpid, bl.getBlockId());
     long start = System.currentTimeMillis();
@@ -710,7 +710,7 @@ public class TestBlockReport {
       // Get block from the first DN
       ret = cluster.getDataNodes().get(DN_N0).
         data.getStoredBlock(lb.getBlock()
-        .getPoolId(), lb.getBlock().getBlockId());
+        .getBlockPoolId(), lb.getBlock().getBlockId());
     return ret;
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Tue Mar  1 19:54:05 2011
@@ -102,7 +102,7 @@ public class TestDataNodeVolumeFailure e
     // delete/make non-writable one of the directories (failed volume)
     data_fail = new File(dataDir, "data3");
     failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
-        cluster.getNamesystem().getPoolId());
+        cluster.getNamesystem().getBlockPoolId());
     if (failedDir.exists() &&
         //!FileUtil.fullyDelete(failedDir)
         !deteteBlocks(failedDir)
@@ -120,7 +120,7 @@ public class TestDataNodeVolumeFailure e
     
     // make sure a block report is sent 
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
-    String bpid = cluster.getNamesystem().getPoolId();
+    String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
     long[] bReport = dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs();
     cluster.getNameNode().blockReport(dnR, bpid, bReport);
@@ -304,7 +304,7 @@ public class TestDataNodeVolumeFailure e
 
   private int countRealBlocks(Map<String, BlockLocs> map) {
     int total = 0;
-    final String bpid = cluster.getNamesystem().getPoolId();
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
     for(int i=0; i<dn_num; i++) {
       for(int j=0; j<=1; j++) {
         File storageDir = MiniDFSCluster.getStorageDir(i, j);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java Tue Mar  1 19:54:05 2011
@@ -112,7 +112,7 @@ public class TestDatanodeRestart {
       dn = cluster.getDataNodes().get(0);
 
       // check volumeMap: one rwr replica
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       ReplicasMap replicas = ((FSDataset)(dn.data)).volumeMap;
       Assert.assertEquals(1, replicas.size(bpid));
       ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
@@ -147,7 +147,7 @@ public class TestDatanodeRestart {
         DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L);
         DFSTestUtil.waitReplication(fs, fileName, (short)1);
       }
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       DataNode dn = cluster.getDataNodes().get(0);
       Iterator<ReplicaInfo> replicasItor = 
         ((FSDataset)dn.data).volumeMap.replicas(bpid).iterator();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Tue Mar  1 19:54:05 2011
@@ -216,7 +216,7 @@ public class TestDirectoryScanner extend
     cluster = new MiniDFSCluster.Builder(CONF).build();
     try {
       cluster.waitActive();
-      bpid = cluster.getNamesystem().getPoolId();
+      bpid = cluster.getNamesystem().getBlockPoolId();
       fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();
       CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                   parallelism);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Tue Mar  1 19:54:05 2011
@@ -60,7 +60,7 @@ public class TestDiskError extends TestC
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     final int dnIndex = 0;
-    String bpid = cluster.getNamesystem().getPoolId();
+    String bpid = cluster.getNamesystem().getBlockPoolId();
     File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
     File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
     storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
@@ -134,7 +134,7 @@ public class TestDiskError extends TestC
       out.close();
       
       // the temporary block & meta files should be deleted
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
       File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
       storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Tue Mar  1 19:54:05 2011
@@ -48,7 +48,7 @@ import org.junit.Test;
  */
 public class TestInterDatanodeProtocol {
   public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
-    Block metainfo = dn.data.getStoredBlock(b.getPoolId(), b.getBlockId());
+    Block metainfo = dn.data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
     Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
     Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
   }
@@ -109,7 +109,7 @@ public class TestInterDatanodeProtocol {
           new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
 
       //verify updateBlock
-      ExtendedBlock newblock = new ExtendedBlock(b.getPoolId(),
+      ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
@@ -224,7 +224,7 @@ public class TestInterDatanodeProtocol {
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
 
       //create a file
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
@@ -262,7 +262,7 @@ public class TestInterDatanodeProtocol {
       //with (block length) != (stored replica's on disk length). 
       {
         //create a block with same id and gs but different length.
-        final ExtendedBlock tmp = new ExtendedBlock(b.getPoolId(), rri
+        final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
             .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
         try {
           //update should fail
@@ -275,7 +275,7 @@ public class TestInterDatanodeProtocol {
 
       //update
       final ReplicaInfo finalized = fsdataset.updateReplicaUnderRecovery(
-          new ExtendedBlock(b.getPoolId(), rri), recoveryid, newlength);
+          new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
 
       //check meta data after update
       FSDataset.checkReplicaFiles(finalized);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Tue Mar  1 19:54:05 2011
@@ -53,7 +53,7 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       setup(bpid, dataSet);
 
       // test close
@@ -73,7 +73,7 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       setup(bpid, dataSet);
 
       // test append
@@ -93,7 +93,7 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       setup(bpid, dataSet);
 
       // test writeToRbw
@@ -113,7 +113,7 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       setup(bpid, dataSet);
 
       // test writeToTemporary

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Tue Mar  1 19:54:05 2011
@@ -874,7 +874,7 @@ public class NNThroughputBenchmark {
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
           nameNode.blockReceived( receivedDNReg, 
-                                  nameNode.getNamesystem().getPoolId(),
+                                  nameNode.getNamesystem().getBlockPoolId(),
                                   new Block[] {blocks[i]},
                                   new String[] {DataNode.EMPTY_DEL_HINT});
         }
@@ -990,7 +990,7 @@ public class NNThroughputBenchmark {
           datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
           nameNode.blockReceived(
               datanodes[dnIdx].dnRegistration, 
-              loc.getBlock().getPoolId(),
+              loc.getBlock().getBlockPoolId(),
               new Block[] {loc.getBlock().getLocalBlock()},
               new String[] {""});
         }
@@ -1010,7 +1010,7 @@ public class NNThroughputBenchmark {
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
       nameNode.blockReport(dn.dnRegistration, nameNode.getNamesystem()
-          .getPoolId(), dn.getBlockReportList());
+          .getBlockPoolId(), dn.getBlockReportList());
       long end = System.currentTimeMillis();
       return end-start;
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Tue Mar  1 19:54:05 2011
@@ -353,7 +353,7 @@ public class TestBlockTokenWithDFS exten
       tryRead(conf, lblock, true);
       // use a token with wrong blockID
       ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
-          .getPoolId(), lblock.getBlock().getBlockId() + 1);
+          .getBlockPoolId(), lblock.getBlock().getBlockId() + 1);
       lblock.setBlockToken(cluster.getNameNode().getNamesystem()
           .blockTokenSecretManager.generateToken(wrongBlock,
               EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Tue Mar  1 19:54:05 2011
@@ -86,7 +86,7 @@ public class TestDeadDatanode {
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
 
-    String poolId = cluster.getNamesystem().getPoolId();
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     // wait for datanode to be marked live
     DataNode dn = cluster.getDataNodes().get(0);
     DatanodeRegistration reg = 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Mar  1 19:54:05 2011
@@ -463,7 +463,7 @@ public class TestFsck extends TestCase {
       System.out.println("1. good fsck out: " + outStr);
       assertTrue(outStr.contains("has 0 CORRUPT files"));
       // delete the blocks
-      final String bpid = cluster.getNamesystem().getPoolId();
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i=0; i<4; i++) {
         for (int j=0; j<=1; j++) {
           File storageDir = MiniDFSCluster.getStorageDir(i, j);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Tue Mar  1 19:54:05 2011
@@ -138,7 +138,7 @@ public class TestListCorruptFileBlocks e
       int numCorrupt = corruptFileBlocks.size();
       assertTrue(numCorrupt == 0);
       // delete the blocks
-      String bpid = cluster.getNamesystem().getPoolId();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i = 0; i < 4; i++) {
         for (int j = 0; j <= 1; j++) {
           File storageDir = MiniDFSCluster.getStorageDir(i, j);
@@ -233,7 +233,7 @@ public class TestListCorruptFileBlocks e
           badFiles.size() == 0);
 
       // Now deliberately blocks from all files
-      final String bpid = cluster.getNamesystem().getPoolId();
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i=0; i<4; i++) {
         for (int j=0; j<=1; j++) {
           File storageDir = MiniDFSCluster.getStorageDir(i, j);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1076001&r1=1076000&r2=1076001&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Tue Mar  1 19:54:05 2011
@@ -340,7 +340,7 @@ public class TestBlockRecovery {
   private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
     DatanodeInfo[] locs = new DatanodeInfo[] {
-        new DatanodeInfo(dn.getDNRegistrationForBP(block.getPoolId())),
+        new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
         mock(DatanodeInfo.class) };
     RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
     blocks.add(rBlock);
@@ -409,7 +409,7 @@ public class TestBlockRecovery {
 
   private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
     List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
-    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getPoolId());
+    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
     BlockRecord blockRecord = new BlockRecord(
         new DatanodeID(dnR), spyDN,
         new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),



Mime
View raw message