hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [09/50] [abbrv] hadoop git commit: HDFS-9869. Erasure Coding: Rename replication-based names in BlockManager to more generic [part-2]. Contributed by Rakesh R.
Date Thu, 28 Apr 2016 18:01:53 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
new file mode 100644
index 0000000..d07c657
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -0,0 +1,418 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * This class tests the internals of PendingReconstructionBlocks.java, as well
+ * as how PendingReconstructionBlocks acts in BlockManager
+ */
+public class TestPendingReconstruction {
+  final static int TIMEOUT = 3;     // 3 seconds
+  private static final int DFS_REPLICATION_INTERVAL = 1;
+  // Number of datanodes in the cluster
+  private static final int DATANODE_COUNT = 5;
+
+  private BlockInfo genBlockInfo(long id, long length, long gs) {
+    return new BlockInfoContiguous(new Block(id, length, gs),
+        (short) DATANODE_COUNT);
+  }
+
+  @Test
+  public void testPendingReconstruction() {
+    PendingReconstructionBlocks pendingReconstructions;
+    pendingReconstructions = new PendingReconstructionBlocks(TIMEOUT * 1000);
+    pendingReconstructions.start();
+    //
+    // Add 10 blocks to pendingReconstruction.
+    //
+    DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(10);
+    for (int i = 0; i < storages.length; i++) {
+      BlockInfo block = genBlockInfo(i, i, 0);
+      DatanodeStorageInfo[] targets = new DatanodeStorageInfo[i];
+      System.arraycopy(storages, 0, targets, 0, i);
+      pendingReconstructions.increment(block,
+          DatanodeStorageInfo.toDatanodeDescriptors(targets));
+    }
+    assertEquals("Size of pendingReconstruction ",
+                 10, pendingReconstructions.size());
+
+
+    //
+    // remove one item
+    //
+    BlockInfo blk = genBlockInfo(8, 8, 0);
+    pendingReconstructions.decrement(blk, storages[7].getDatanodeDescriptor()); // removes
one replica
+    assertEquals("pendingReconstructions.getNumReplicas ",
+                 7, pendingReconstructions.getNumReplicas(blk));
+
+    //
+    // insert the same item twice should be counted as once
+    //
+    pendingReconstructions.increment(blk, storages[0].getDatanodeDescriptor());
+    assertEquals("pendingReconstructions.getNumReplicas ",
+        7, pendingReconstructions.getNumReplicas(blk));
+
+    for (int i = 0; i < 7; i++) {
+      // removes all replicas
+      pendingReconstructions.decrement(blk, storages[i].getDatanodeDescriptor());
+    }
+    assertTrue(pendingReconstructions.size() == 9);
+    pendingReconstructions.increment(blk,
+        DatanodeStorageInfo.toDatanodeDescriptors(
+            DFSTestUtil.createDatanodeStorageInfos(8)));
+    assertTrue(pendingReconstructions.size() == 10);
+
+    //
+    // verify that the number of replicas returned
+    // are sane.
+    //
+    for (int i = 0; i < 10; i++) {
+      BlockInfo block = genBlockInfo(i, i, 0);
+      int numReplicas = pendingReconstructions.getNumReplicas(block);
+      assertTrue(numReplicas == i);
+    }
+
+    //
+    // verify that nothing has timed out so far
+    //
+    assertTrue(pendingReconstructions.getTimedOutBlocks() == null);
+
+    //
+    // Wait for one second and then insert some more items.
+    //
+    try {
+      Thread.sleep(1000);
+    } catch (Exception e) {
+    }
+
+    for (int i = 10; i < 15; i++) {
+      BlockInfo block = genBlockInfo(i, i, 0);
+      pendingReconstructions.increment(block,
+          DatanodeStorageInfo.toDatanodeDescriptors(
+              DFSTestUtil.createDatanodeStorageInfos(i)));
+    }
+    assertTrue(pendingReconstructions.size() == 15);
+
+    //
+    // Wait for everything to timeout.
+    //
+    int loop = 0;
+    while (pendingReconstructions.size() > 0) {
+      try {
+        Thread.sleep(1000);
+      } catch (Exception e) {
+      }
+      loop++;
+    }
+    System.out.println("Had to wait for " + loop +
+                       " seconds for the lot to timeout");
+
+    //
+    // Verify that everything has timed out.
+    //
+    assertEquals("Size of pendingReconstructions ", 0, pendingReconstructions.size());
+    Block[] timedOut = pendingReconstructions.getTimedOutBlocks();
+    assertTrue(timedOut != null && timedOut.length == 15);
+    for (int i = 0; i < timedOut.length; i++) {
+      assertTrue(timedOut[i].getBlockId() < 15);
+    }
+    pendingReconstructions.stop();
+  }
+
+/* Test that processpendingReconstructions will use the most recent
+ * blockinfo from the blocksmap by placing a larger genstamp into
+ * the blocksmap.
+ */
+  @Test
+  public void testProcessPendingReconstructions() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+    MiniDFSCluster cluster = null;
+    Block block;
+    BlockInfo blockInfo;
+    try {
+      cluster =
+          new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
+      cluster.waitActive();
+
+      FSNamesystem fsn = cluster.getNamesystem();
+      BlockManager blkManager = fsn.getBlockManager();
+
+      PendingReconstructionBlocks pendingReconstruction =
+          blkManager.pendingReconstruction;
+      LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
+      BlocksMap blocksMap = blkManager.blocksMap;
+
+      //
+      // Add 1 block to pendingReconstructions with GenerationStamp = 0.
+      //
+
+      block = new Block(1, 1, 0);
+      blockInfo = new BlockInfoContiguous(block, (short) 3);
+
+      pendingReconstruction.increment(blockInfo,
+          DatanodeStorageInfo.toDatanodeDescriptors(
+              DFSTestUtil.createDatanodeStorageInfos(1)));
+      BlockCollection bc = Mockito.mock(BlockCollection.class);
+      // Place into blocksmap with GenerationStamp = 1
+      blockInfo.setGenerationStamp(1);
+      blocksMap.addBlockCollection(blockInfo, bc);
+
+      assertEquals("Size of pendingReconstructions ", 1,
+          pendingReconstruction.size());
+
+      // Add a second block to pendingReconstructions that has no
+      // corresponding entry in blocksmap
+      block = new Block(2, 2, 0);
+      blockInfo = new BlockInfoContiguous(block, (short) 3);
+      pendingReconstruction.increment(blockInfo,
+          DatanodeStorageInfo.toDatanodeDescriptors(
+              DFSTestUtil.createDatanodeStorageInfos(1)));
+
+      // verify 2 blocks in pendingReconstructions
+      assertEquals("Size of pendingReconstructions ", 2,
+          pendingReconstruction.size());
+
+      //
+      // Wait for everything to timeout.
+      //
+      while (pendingReconstruction.size() > 0) {
+        try {
+          Thread.sleep(100);
+        } catch (Exception e) {
+        }
+      }
+
+      //
+      // Verify that block moves to neededReconstruction
+      //
+      while (neededReconstruction.size() == 0) {
+        try {
+          Thread.sleep(100);
+        } catch (Exception e) {
+        }
+      }
+
+      // Verify that the generation stamp we will try to replicate
+      // is now 1
+      for (Block b: neededReconstruction) {
+        assertEquals("Generation stamp is 1 ", 1,
+            b.getGenerationStamp());
+      }
+
+      // Verify size of neededReconstruction is exactly 1.
+      assertEquals("size of neededReconstruction is 1 ", 1,
+          neededReconstruction.size());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
+   * pending reconstruction. Also make sure the blockReceivedAndDeleted call is
+   * idempotent to the pending reconstruction.
+   */
+  @Test
+  public void testBlockReceived() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+          DATANODE_COUNT).build();
+      cluster.waitActive();
+
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+      FSNamesystem fsn = cluster.getNamesystem();
+      BlockManager blkManager = fsn.getBlockManager();
+
+      final String file = "/tmp.txt";
+      final Path filePath = new Path(file);
+      short replFactor = 1;
+      DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
+
+      // temporarily stop the heartbeat
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      for (int i = 0; i < DATANODE_COUNT; i++) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
+      }
+
+      hdfs.setReplication(filePath, (short) DATANODE_COUNT);
+      BlockManagerTestUtil.computeAllPendingWork(blkManager);
+
+      assertEquals(1, blkManager.pendingReconstruction.size());
+      INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
+      BlockInfo[] blocks = fileNode.getBlocks();
+      assertEquals(DATANODE_COUNT - 1,
+          blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
+
+      LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
+          .get(0);
+      DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
+      int reportDnNum = 0;
+      String poolId = cluster.getNamesystem().getBlockPoolId();
+      // let two datanodes (other than the one that already has the data) to
+      // report to NN
+      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
+        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
+          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
+              poolId);
+          StorageReceivedDeletedBlocks[] report = {
+              new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
+              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
+                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
+          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
+          reportDnNum++;
+        }
+      }
+      // IBRs are async, make sure the NN processes all of them.
+      cluster.getNamesystem().getBlockManager().flushBlockOps();
+      assertEquals(DATANODE_COUNT - 3,
+          blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
+
+      // let the same datanodes report again
+      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
+        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
+          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
+              poolId);
+          StorageReceivedDeletedBlocks[] report =
+            { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
+              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
+                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
+          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
+          reportDnNum++;
+        }
+      }
+
+      cluster.getNamesystem().getBlockManager().flushBlockOps();
+      assertEquals(DATANODE_COUNT - 3,
+          blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
+
+      // re-enable heartbeat for the datanode that has data
+      for (int i = 0; i < DATANODE_COUNT; i++) {
+        DataNodeTestUtils
+            .setHeartbeatsDisabledForTests(datanodes.get(i), false);
+        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+      }
+
+      Thread.sleep(5000);
+      assertEquals(0, blkManager.pendingReconstruction.size());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test if BlockManager can correctly remove corresponding pending records
+   * when a file is deleted
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testPendingAndInvalidate() throws Exception {
+    final Configuration CONF = new HdfsConfiguration();
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFS_REPLICATION_INTERVAL);
+    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+        DFS_REPLICATION_INTERVAL);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
+        DATANODE_COUNT).build();
+    cluster.waitActive();
+
+    FSNamesystem namesystem = cluster.getNamesystem();
+    BlockManager bm = namesystem.getBlockManager();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    try {
+      // 1. create a file
+      Path filePath = new Path("/tmp.txt");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
+
+      // 2. disable the heartbeats
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+      }
+
+      // 3. mark a couple of blocks as corrupt
+      LocatedBlock block = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
+      cluster.getNamesystem().writeLock();
+      try {
+        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
+            "STORAGE_ID", "TEST");
+        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
+            "STORAGE_ID", "TEST");
+      } finally {
+        cluster.getNamesystem().writeUnlock();
+      }
+      BlockManagerTestUtil.computeAllPendingWork(bm);
+      BlockManagerTestUtil.updateState(bm);
+      assertEquals(bm.getPendingReconstructionBlocksCount(), 1L);
+      BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
+      assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
+
+      // 4. delete the file
+      fs.delete(filePath, true);
+      // retry at most 10 times, each time sleep for 1s. Note that 10s is much
+      // less than the default pending record timeout (5~10min)
+      int retries = 10;
+      long pendingNum = bm.getPendingReconstructionBlocksCount();
+      while (pendingNum != 0 && retries-- > 0) {
+        Thread.sleep(1000);  // let NN do the deletion
+        BlockManagerTestUtil.updateState(bm);
+        pendingNum = bm.getPendingReconstructionBlocksCount();
+      }
+      assertEquals(pendingNum, 0L);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
deleted file mode 100644
index f04387d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ /dev/null
@@ -1,418 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * This class tests the internals of PendingReplicationBlocks.java,
- * as well as how PendingReplicationBlocks acts in BlockManager
- */
-public class TestPendingReplication {
-  final static int TIMEOUT = 3;     // 3 seconds
-  private static final int DFS_REPLICATION_INTERVAL = 1;
-  // Number of datanodes in the cluster
-  private static final int DATANODE_COUNT = 5;
-
-  private BlockInfo genBlockInfo(long id, long length, long gs) {
-    return new BlockInfoContiguous(new Block(id, length, gs),
-        (short) DATANODE_COUNT);
-  }
-
-  @Test
-  public void testPendingReplication() {
-    PendingReplicationBlocks pendingReplications;
-    pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000);
-    pendingReplications.start();
-    //
-    // Add 10 blocks to pendingReplications.
-    //
-    DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(10);
-    for (int i = 0; i < storages.length; i++) {
-      BlockInfo block = genBlockInfo(i, i, 0);
-      DatanodeStorageInfo[] targets = new DatanodeStorageInfo[i];
-      System.arraycopy(storages, 0, targets, 0, i);
-      pendingReplications.increment(block,
-          DatanodeStorageInfo.toDatanodeDescriptors(targets));
-    }
-    assertEquals("Size of pendingReplications ",
-                 10, pendingReplications.size());
-
-
-    //
-    // remove one item
-    //
-    BlockInfo blk = genBlockInfo(8, 8, 0);
-    pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one
replica
-    assertEquals("pendingReplications.getNumReplicas ",
-                 7, pendingReplications.getNumReplicas(blk));
-
-    //
-    // insert the same item twice should be counted as once
-    //
-    pendingReplications.increment(blk, storages[0].getDatanodeDescriptor());
-    assertEquals("pendingReplications.getNumReplicas ",
-        7, pendingReplications.getNumReplicas(blk));
-
-    for (int i = 0; i < 7; i++) {
-      // removes all replicas
-      pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());
-    }
-    assertTrue(pendingReplications.size() == 9);
-    pendingReplications.increment(blk,
-        DatanodeStorageInfo.toDatanodeDescriptors(
-            DFSTestUtil.createDatanodeStorageInfos(8)));
-    assertTrue(pendingReplications.size() == 10);
-
-    //
-    // verify that the number of replicas returned
-    // are sane.
-    //
-    for (int i = 0; i < 10; i++) {
-      BlockInfo block = genBlockInfo(i, i, 0);
-      int numReplicas = pendingReplications.getNumReplicas(block);
-      assertTrue(numReplicas == i);
-    }
-
-    //
-    // verify that nothing has timed out so far
-    //
-    assertTrue(pendingReplications.getTimedOutBlocks() == null);
-
-    //
-    // Wait for one second and then insert some more items.
-    //
-    try {
-      Thread.sleep(1000);
-    } catch (Exception e) {
-    }
-
-    for (int i = 10; i < 15; i++) {
-      BlockInfo block = genBlockInfo(i, i, 0);
-      pendingReplications.increment(block,
-          DatanodeStorageInfo.toDatanodeDescriptors(
-              DFSTestUtil.createDatanodeStorageInfos(i)));
-    }
-    assertTrue(pendingReplications.size() == 15);
-
-    //
-    // Wait for everything to timeout.
-    //
-    int loop = 0;
-    while (pendingReplications.size() > 0) {
-      try {
-        Thread.sleep(1000);
-      } catch (Exception e) {
-      }
-      loop++;
-    }
-    System.out.println("Had to wait for " + loop +
-                       " seconds for the lot to timeout");
-
-    //
-    // Verify that everything has timed out.
-    //
-    assertEquals("Size of pendingReplications ", 0, pendingReplications.size());
-    Block[] timedOut = pendingReplications.getTimedOutBlocks();
-    assertTrue(timedOut != null && timedOut.length == 15);
-    for (int i = 0; i < timedOut.length; i++) {
-      assertTrue(timedOut[i].getBlockId() < 15);
-    }
-    pendingReplications.stop();
-  }
-
-/* Test that processPendingReplications will use the most recent
- * blockinfo from the blocksmap by placing a larger genstamp into
- * the blocksmap.
- */
-  @Test
-  public void testProcessPendingReplications() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
-    MiniDFSCluster cluster = null;
-    Block block;
-    BlockInfo blockInfo;
-    try {
-      cluster =
-          new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
-      cluster.waitActive();
-
-      FSNamesystem fsn = cluster.getNamesystem();
-      BlockManager blkManager = fsn.getBlockManager();
-
-      PendingReplicationBlocks pendingReplications =
-          blkManager.pendingReplications;
-      LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
-      BlocksMap blocksMap = blkManager.blocksMap;
-
-      //
-      // Add 1 block to pendingReplications with GenerationStamp = 0.
-      //
-
-      block = new Block(1, 1, 0);
-      blockInfo = new BlockInfoContiguous(block, (short) 3);
-
-      pendingReplications.increment(blockInfo,
-          DatanodeStorageInfo.toDatanodeDescriptors(
-              DFSTestUtil.createDatanodeStorageInfos(1)));
-      BlockCollection bc = Mockito.mock(BlockCollection.class);
-      // Place into blocksmap with GenerationStamp = 1
-      blockInfo.setGenerationStamp(1);
-      blocksMap.addBlockCollection(blockInfo, bc);
-
-      assertEquals("Size of pendingReplications ", 1,
-          pendingReplications.size());
-
-      // Add a second block to pendingReplications that has no
-      // corresponding entry in blocksmap
-      block = new Block(2, 2, 0);
-      blockInfo = new BlockInfoContiguous(block, (short) 3);
-      pendingReplications.increment(blockInfo,
-          DatanodeStorageInfo.toDatanodeDescriptors(
-              DFSTestUtil.createDatanodeStorageInfos(1)));
-
-      // verify 2 blocks in pendingReplications
-      assertEquals("Size of pendingReplications ", 2,
-          pendingReplications.size());
-
-      //
-      // Wait for everything to timeout.
-      //
-      while (pendingReplications.size() > 0) {
-        try {
-          Thread.sleep(100);
-        } catch (Exception e) {
-        }
-      }
-
-      //
-      // Verify that block moves to neededReconstruction
-      //
-      while (neededReconstruction.size() == 0) {
-        try {
-          Thread.sleep(100);
-        } catch (Exception e) {
-        }
-      }
-
-      // Verify that the generation stamp we will try to replicate
-      // is now 1
-      for (Block b: neededReconstruction) {
-        assertEquals("Generation stamp is 1 ", 1,
-            b.getGenerationStamp());
-      }
-
-      // Verify size of neededReconstruction is exactly 1.
-      assertEquals("size of neededReconstruction is 1 ", 1,
-          neededReconstruction.size());
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-  
-  /**
-   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
-   * pending replications. Also make sure the blockReceivedAndDeleted call is
-   * idempotent to the pending replications. 
-   */
-  @Test
-  public void testBlockReceived() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
-          DATANODE_COUNT).build();
-      cluster.waitActive();
-
-      DistributedFileSystem hdfs = cluster.getFileSystem();
-      FSNamesystem fsn = cluster.getNamesystem();
-      BlockManager blkManager = fsn.getBlockManager();
-    
-      final String file = "/tmp.txt";
-      final Path filePath = new Path(file);
-      short replFactor = 1;
-      DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
-
-      // temporarily stop the heartbeat
-      ArrayList<DataNode> datanodes = cluster.getDataNodes();
-      for (int i = 0; i < DATANODE_COUNT; i++) {
-        DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
-      }
-
-      hdfs.setReplication(filePath, (short) DATANODE_COUNT);
-      BlockManagerTestUtil.computeAllPendingWork(blkManager);
-
-      assertEquals(1, blkManager.pendingReplications.size());
-      INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
-      BlockInfo[] blocks = fileNode.getBlocks();
-      assertEquals(DATANODE_COUNT - 1,
-          blkManager.pendingReplications.getNumReplicas(blocks[0]));
-
-      LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
-          .get(0);
-      DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
-      int reportDnNum = 0;
-      String poolId = cluster.getNamesystem().getBlockPoolId();
-      // let two datanodes (other than the one that already has the data) to
-      // report to NN
-      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
-        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
-          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
-              poolId);
-          StorageReceivedDeletedBlocks[] report = { 
-              new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
-              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
-                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
-          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
-          reportDnNum++;
-        }
-      }
-      // IBRs are async, make sure the NN processes all of them.
-      cluster.getNamesystem().getBlockManager().flushBlockOps();
-      assertEquals(DATANODE_COUNT - 3,
-          blkManager.pendingReplications.getNumReplicas(blocks[0]));
-
-      // let the same datanodes report again
-      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
-        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
-          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
-              poolId);
-          StorageReceivedDeletedBlocks[] report = 
-            { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
-              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
-                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
-          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
-          reportDnNum++;
-        }
-      }
-
-      cluster.getNamesystem().getBlockManager().flushBlockOps();
-      assertEquals(DATANODE_COUNT - 3,
-          blkManager.pendingReplications.getNumReplicas(blocks[0]));
-
-      // re-enable heartbeat for the datanode that has data
-      for (int i = 0; i < DATANODE_COUNT; i++) {
-        DataNodeTestUtils
-            .setHeartbeatsDisabledForTests(datanodes.get(i), false);
-        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
-      }
-
-      Thread.sleep(5000);
-      assertEquals(0, blkManager.pendingReplications.size());
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-  
-  /**
-   * Test if BlockManager can correctly remove corresponding pending records
-   * when a file is deleted
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testPendingAndInvalidate() throws Exception {
-    final Configuration CONF = new HdfsConfiguration();
-    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
-    CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
-        DFS_REPLICATION_INTERVAL);
-    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
-        DFS_REPLICATION_INTERVAL);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
-        DATANODE_COUNT).build();
-    cluster.waitActive();
-    
-    FSNamesystem namesystem = cluster.getNamesystem();
-    BlockManager bm = namesystem.getBlockManager();
-    DistributedFileSystem fs = cluster.getFileSystem();
-    try {
-      // 1. create a file
-      Path filePath = new Path("/tmp.txt");
-      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
-      
-      // 2. disable the heartbeats
-      for (DataNode dn : cluster.getDataNodes()) {
-        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
-      }
-      
-      // 3. mark a couple of blocks as corrupt
-      LocatedBlock block = NameNodeAdapter.getBlockLocations(
-          cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
-      cluster.getNamesystem().writeLock();
-      try {
-        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
-            "STORAGE_ID", "TEST");
-        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
-            "STORAGE_ID", "TEST");
-      } finally {
-        cluster.getNamesystem().writeUnlock();
-      }
-      BlockManagerTestUtil.computeAllPendingWork(bm);
-      BlockManagerTestUtil.updateState(bm);
-      assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
-      BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
-      assertEquals(bm.pendingReplications.getNumReplicas(storedBlock), 2);
-
-      // 4. delete the file
-      fs.delete(filePath, true);
-      // retry at most 10 times, each time sleep for 1s. Note that 10s is much
-      // less than the default pending record timeout (5~10min)
-      int retries = 10; 
-      long pendingNum = bm.getPendingReplicationBlocksCount();
-      while (pendingNum != 0 && retries-- > 0) {
-        Thread.sleep(1000);  // let NN do the deletion
-        BlockManagerTestUtil.updateState(bm);
-        pendingNum = bm.getPendingReplicationBlocksCount();
-      }
-      assertEquals(pendingNum, 0L);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 1e7312a..341933e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -96,8 +96,8 @@ public class TestDecommissioningStatus {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
         1000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
-        4);
+    conf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
     conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index d723525..1032107 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -98,7 +98,7 @@ public class TestFileTruncate {
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
     conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
     cluster = new MiniDFSCluster.Builder(conf)
         .format(true)
         .numDataNodes(DATANODE_NUM)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
index d35b8a7..cb19c2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
@@ -82,7 +82,7 @@ public class TestHostsFiles {
 
     // Have the NN check for pending replications every second so it
     // quickly schedules additional replicas as they are identified.
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
 
     // The DNs report blocks every second.
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index 058ab8a..c51ca5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -156,7 +156,7 @@ public class TestMetaSave {
       line = reader.readLine();
       assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
       line = reader.readLine();
-      assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
+      assertTrue(line.equals("Metasave: Blocks being reconstructed: 0"));
       line = reader.readLine();
       assertTrue(line.equals("Metasave: Blocks 2 waiting deletion from 1 datanodes."));
      //skip 2 lines to reach HDFS-9033 scenario.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
index bc7a0ef4..d17d800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
@@ -58,7 +58,7 @@ public class TestProcessCorruptBlocks {
   public void testWhenDecreasingReplication() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     final FSNamesystem namesystem = cluster.getNamesystem();
@@ -113,7 +113,7 @@ public class TestProcessCorruptBlocks {
   public void testByAddingAnExtraDataNode() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
     FileSystem fs = cluster.getFileSystem();
     final FSNamesystem namesystem = cluster.getNamesystem();
@@ -164,7 +164,7 @@ public class TestProcessCorruptBlocks {
   public void testWithReplicationFactorAsOne() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     final FSNamesystem namesystem = cluster.getNamesystem();
@@ -218,7 +218,7 @@ public class TestProcessCorruptBlocks {
   public void testWithAllCorruptReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     final FSNamesystem namesystem = cluster.getNamesystem();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
index 1c2dc91..6069924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
@@ -209,7 +209,7 @@ public class TestReconstructStripedBlocks {
           cellSize, NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS);
 
       assertEquals(0, getNumberOfBlocksToBeErasureCoded(cluster));
-      assertEquals(0, bm.getPendingReplicationBlocksCount());
+      assertEquals(0, bm.getPendingReconstructionBlocksCount());
 
       // missing 1 block, so 1 task should be scheduled
       DatanodeInfo dn0 = lbs[0].getLocations()[0];
@@ -217,7 +217,7 @@ public class TestReconstructStripedBlocks {
       cluster.setDataNodeDead(dn0);
       BlockManagerTestUtil.getComputedDatanodeWork(bm);
       assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
-      assertEquals(1, bm.getPendingReplicationBlocksCount());
+      assertEquals(1, bm.getPendingReconstructionBlocksCount());
 
       // missing another block, but no new task should be scheduled because
       // previous task isn't finished.
@@ -226,7 +226,7 @@ public class TestReconstructStripedBlocks {
       cluster.setDataNodeDead(dn1);
       BlockManagerTestUtil.getComputedDatanodeWork(bm);
       assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
-      assertEquals(1, bm.getPendingReplicationBlocksCount());
+      assertEquals(1, bm.getPendingReconstructionBlocksCount());
     } finally {
       cluster.shutdown();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index 96822d6..3b3c35f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -296,7 +296,7 @@ public class TestDNFencing {
       LOG.info("Getting more replication work computed");
     }
     BlockManager bm1 = nn1.getNamesystem().getBlockManager();
-    while (bm1.getPendingReplicationBlocksCount() > 0) {
+    while (bm1.getPendingReconstructionBlocksCount() > 0) {
       BlockManagerTestUtil.updateState(bm1);
       cluster.triggerHeartbeats();
       Thread.sleep(1000);


Mime
View raw message