hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [27/46] hadoop git commit: HDFS-9857. Erasure Coding: Rename replication-based names in BlockManager to more generic [part-1]. Contributed by Rakesh R.
Date Fri, 18 Mar 2016 18:25:27 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32d043d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index 18f28d5..f04387d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -183,7 +183,7 @@ public class TestPendingReplication {
 
       PendingReplicationBlocks pendingReplications =
           blkManager.pendingReplications;
-      UnderReplicatedBlocks neededReplications = blkManager.neededReplications;
+      LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
       BlocksMap blocksMap = blkManager.blocksMap;
 
       //
@@ -227,9 +227,9 @@ public class TestPendingReplication {
       }
 
       //
-      // Verify that block moves to neededReplications
+      // Verify that block moves to neededReconstruction
       //
-      while (neededReplications.size() == 0) {
+      while (neededReconstruction.size() == 0) {
         try {
           Thread.sleep(100);
         } catch (Exception e) {
@@ -238,14 +238,14 @@ public class TestPendingReplication {
 
       // Verify that the generation stamp we will try to replicate
       // is now 1
-      for (Block b: neededReplications) {
+      for (Block b: neededReconstruction) {
         assertEquals("Generation stamp is 1 ", 1,
             b.getGenerationStamp());
       }
 
-      // Verify size of neededReplications is exactly 1.
-      assertEquals("size of neededReplications is 1 ", 1,
-          neededReplications.size());
+      // Verify size of neededReconstruction is exactly 1.
+      assertEquals("size of neededReconstruction is 1 ", 1,
+          neededReconstruction.size());
     } finally {
       if (cluster != null) {
         cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32d043d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index be63d87..9f8985a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -836,12 +836,12 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
         .format(true).build();
     try {
       cluster.waitActive();
-      final UnderReplicatedBlocks neededReplications = cluster.getNameNode()
-          .getNamesystem().getBlockManager().neededReplications;
+      final LowRedundancyBlocks neededReconstruction = cluster.getNameNode()
+          .getNamesystem().getBlockManager().neededReconstruction;
       for (int i = 0; i < 100; i++) {
         // Adding the blocks directly to normal priority
 
-        neededReplications.add(genBlockInfo(ThreadLocalRandom.current().
+        neededReconstruction.add(genBlockInfo(ThreadLocalRandom.current().
             nextLong()), 2, 0, 0, 3);
       }
       // Lets wait for the replication interval, to start process normal
@@ -849,7 +849,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
       Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
       
       // Adding the block directly to high priority list
-      neededReplications.add(genBlockInfo(ThreadLocalRandom.current().
+      neededReconstruction.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 1, 0, 0, 3);
 
       // Lets wait for the replication interval
@@ -858,68 +858,68 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
       // Check replication completed successfully. Need not wait till it process
       // all the 100 normal blocks.
       assertFalse("Not able to clear the element from high priority list",
-          neededReplications.iterator(HIGH_PRIORITY).hasNext());
+          neededReconstruction.iterator(HIGH_PRIORITY).hasNext());
     } finally {
       cluster.shutdown();
     }
   }
   
   /**
-   * Test for the ChooseUnderReplicatedBlocks are processed based on priority
+   * Test for the ChooseLowRedundancyBlocks are processed based on priority
    */
   @Test
-  public void testChooseUnderReplicatedBlocks() throws Exception {
-    UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
+  public void testChooseLowRedundancyBlocks() throws Exception {
+    LowRedundancyBlocks lowRedundancyBlocks = new LowRedundancyBlocks();
 
     for (int i = 0; i < 5; i++) {
       // Adding QUEUE_HIGHEST_PRIORITY block
-      underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+      lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 1, 0, 0, 3);
 
-      // Adding QUEUE_VERY_UNDER_REPLICATED block
-      underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+      // Adding QUEUE_VERY_LOW_REDUNDANCY block
+      lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 2, 0, 0, 7);
 
       // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
-      underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+      lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 6, 0, 0, 6);
 
-      // Adding QUEUE_UNDER_REPLICATED block
-      underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+      // Adding QUEUE_LOW_REDUNDANCY block
+      lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 5, 0, 0, 6);
 
       // Adding QUEUE_WITH_CORRUPT_BLOCKS block
-      underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+      lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
           nextLong()), 0, 0, 0, 3);
     }
 
-    // Choose 6 blocks from UnderReplicatedBlocks. Then it should pick 5 blocks
-    // from
-    // QUEUE_HIGHEST_PRIORITY and 1 block from QUEUE_VERY_UNDER_REPLICATED.
+    // Choose 6 blocks from lowRedundancyBlocks. Then it should pick 5 blocks
+    // from QUEUE_HIGHEST_PRIORITY and 1 block from QUEUE_VERY_LOW_REDUNDANCY.
     List<List<BlockInfo>> chosenBlocks =
-        underReplicatedBlocks.chooseUnderReplicatedBlocks(6);
+        lowRedundancyBlocks.chooseLowRedundancyBlocks(6);
     assertTheChosenBlocks(chosenBlocks, 5, 1, 0, 0, 0);
 
-    // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 4 blocks from
-    // QUEUE_VERY_UNDER_REPLICATED, 5 blocks from QUEUE_UNDER_REPLICATED and 1
+    // Choose 10 blocks from lowRedundancyBlocks. Then it should pick 4 blocks
+    // from QUEUE_VERY_LOW_REDUNDANCY, 5 blocks from QUEUE_LOW_REDUNDANCY and 1
     // block from QUEUE_REPLICAS_BADLY_DISTRIBUTED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(10);
     assertTheChosenBlocks(chosenBlocks, 0, 4, 5, 1, 0);
 
     // Adding QUEUE_HIGHEST_PRIORITY
-    underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
+    lowRedundancyBlocks.add(genBlockInfo(ThreadLocalRandom.current().
         nextLong()), 0, 1, 0, 3);
 
-    // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 1 block from
-    // QUEUE_HIGHEST_PRIORITY, 4 blocks from QUEUE_REPLICAS_BADLY_DISTRIBUTED
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
+    // Choose 10 blocks from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_HIGHEST_PRIORITY, 4 blocks from
+    // QUEUE_REPLICAS_BADLY_DISTRIBUTED
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(10);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 4);
 
     // Since it is reached to end of all lists,
     // should start picking the blocks from start.
-    // Choose 7 blocks from UnderReplicatedBlocks. Then it should pick 6 blocks from
-    // QUEUE_HIGHEST_PRIORITY, 1 block from QUEUE_VERY_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(7);
+    // Choose 7 blocks from lowRedundancyBlocks. Then it should pick 6 blocks
+    // from QUEUE_HIGHEST_PRIORITY, 1 block from QUEUE_VERY_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(7);
     assertTheChosenBlocks(chosenBlocks, 6, 1, 0, 0, 0);
   }
   
@@ -1268,45 +1268,45 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
 
   @Test(timeout = 60000)
   public void testUpdateDoesNotCauseSkippedReplication() {
-    UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
+    LowRedundancyBlocks lowRedundancyBlocks = new LowRedundancyBlocks();
 
     BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
     BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
     BlockInfo block3 = genBlockInfo(ThreadLocalRandom.current().nextLong());
 
-    // Adding QUEUE_VERY_UNDER_REPLICATED block
+    // Adding QUEUE_VERY_LOW_REDUNDANCY block
     final int block1CurReplicas = 2;
     final int block1ExpectedReplicas = 7;
-    underReplicatedBlocks.add(block1, block1CurReplicas, 0, 0,
+    lowRedundancyBlocks.add(block1, block1CurReplicas, 0, 0,
         block1ExpectedReplicas);
 
-    // Adding QUEUE_VERY_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block2, 2, 0, 0, 7);
+    // Adding QUEUE_VERY_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block2, 2, 0, 0, 7);
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block3, 2, 0, 0, 6);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block3, 2, 0, 0, 6);
 
     List<List<BlockInfo>> chosenBlocks;
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
 
     // Increasing the replications will move the block down a
     // priority.  This simulates a replica being completed in between checks.
-    underReplicatedBlocks.update(block1, block1CurReplicas+1, 0, 0,
+    lowRedundancyBlocks.update(block1, block1CurReplicas+1, 0, 0,
         block1ExpectedReplicas, 1, 0);
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
     // This block was moved up a priority and should not be skipped over.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 0, 0, 1, 0, 0);
   }
 
@@ -1317,27 +1317,27 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
     when(mockNS.hasWriteLock()).thenReturn(true);
     when(mockNS.hasReadLock()).thenReturn(true);
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
-    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+    LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
 
     BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
     BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block1, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block2, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
 
     List<List<BlockInfo>> chosenBlocks;
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
 
-    // Adding this block collection to the BlockManager, so that when we add the
+    // Adding this block collection to the BlockManager, so that when we add
     // block under construction, the BlockManager will realize the expected
-    // replication has been achieved and remove it from the under-replicated
+    // replication has been achieved and remove it from the low redundancy
     // queue.
     BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
     info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
@@ -1353,9 +1353,9 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
         ReplicaState.FINALIZED), storages[0]);
 
     // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
+    // from QUEUE_VERY_LOW_REDUNDANCY.
     // This block remains and should not be skipped over.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
   }
 
@@ -1367,7 +1367,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
     when(mockNS.hasWriteLock()).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
-    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+    LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
 
     long blkID1 = ThreadLocalRandom.current().nextLong();
     if (blkID1 < 0) {
@@ -1381,17 +1381,17 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
     BlockInfo block1 = genBlockInfo(blkID1);
     BlockInfo block2 = genBlockInfo(blkID2);
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block1, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block2, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
 
     List<List<BlockInfo>> chosenBlocks;
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
 
     final BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
@@ -1425,10 +1425,10 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
 
     bm.convertLastBlockToUnderConstruction(mbc, 0L);
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
     // This block remains and should not be skipped over.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
   }
 
@@ -1439,30 +1439,30 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest
{
     when(mockNS.hasReadLock()).thenReturn(true);
 
     BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
-    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+    LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
 
     BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
     BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block1, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
 
-    // Adding QUEUE_UNDER_REPLICATED block
-    underReplicatedBlocks.add(block2, 0, 0, 1, 1);
+    // Adding QUEUE_LOW_REDUNDANCY block
+    lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
 
     List<List<BlockInfo>> chosenBlocks;
 
-    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    // Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_LOW_REDUNDANCY.
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
 
     bm.setReplication((short)0, (short)1, block1);
 
     // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
-    // from QUEUE_VERY_UNDER_REPLICATED.
+    // from QUEUE_VERY_LOW_REDUNDANCY.
     // This block remains and should not be skipped over.
-    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
     assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32d043d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
deleted file mode 100644
index b228d09..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import java.util.Iterator;
-
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-public class TestUnderReplicatedBlockQueues {
-
-  private final ErasureCodingPolicy ecPolicy =
-      ErasureCodingPolicyManager.getSystemDefaultPolicy();
-
-  private BlockInfo genBlockInfo(long id) {
-    return new BlockInfoContiguous(new Block(id), (short) 3);
-  }
-
-  private BlockInfo genStripedBlockInfo(long id, long numBytes) {
-    BlockInfoStriped sblk =  new BlockInfoStriped(new Block(id), ecPolicy);
-    sblk.setNumBytes(numBytes);
-    return sblk;
-  }
-
-  /**
-   * Test that adding blocks with different replication counts puts them
-   * into different queues
-   * @throws Throwable if something goes wrong
-   */
-  @Test
-  public void testBlockPriorities() throws Throwable {
-    UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
-    BlockInfo block1 = genBlockInfo(1);
-    BlockInfo block2 = genBlockInfo(2);
-    BlockInfo block_very_under_replicated = genBlockInfo(3);
-    BlockInfo block_corrupt = genBlockInfo(4);
-    BlockInfo block_corrupt_repl_one = genBlockInfo(5);
-
-    //add a block with a single entry
-    assertAdded(queues, block1, 1, 0, 3);
-
-    assertEquals(1, queues.getUnderReplicatedBlockCount());
-    assertEquals(1, queues.size());
-    assertInLevel(queues, block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-    //repeated additions fail
-    assertFalse(queues.add(block1, 1, 0, 0, 3));
-
-    //add a second block with two replicas
-    assertAdded(queues, block2, 2, 0, 3);
-    assertEquals(2, queues.getUnderReplicatedBlockCount());
-    assertEquals(2, queues.size());
-    assertInLevel(queues, block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-    //now try to add a block that is corrupt
-    assertAdded(queues, block_corrupt, 0, 0, 3);
-    assertEquals(3, queues.size());
-    assertEquals(2, queues.getUnderReplicatedBlockCount());
-    assertEquals(1, queues.getCorruptBlockSize());
-    assertInLevel(queues, block_corrupt,
-                  UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
-
-    //insert a very under-replicated block
-    assertAdded(queues, block_very_under_replicated, 4, 0, 25);
-    assertInLevel(queues, block_very_under_replicated,
-                  UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
-
-    //insert a corrupt block with replication factor 1
-    assertAdded(queues, block_corrupt_repl_one, 0, 0, 1);
-    assertEquals(2, queues.getCorruptBlockSize());
-    assertEquals(1, queues.getCorruptReplOneBlockSize());
-    queues.update(block_corrupt_repl_one, 0, 0, 0, 3, 0, 2);
-    assertEquals(0, queues.getCorruptReplOneBlockSize());
-    queues.update(block_corrupt, 0, 0, 0, 1, 0, -2);
-    assertEquals(1, queues.getCorruptReplOneBlockSize());
-    queues.update(block_very_under_replicated, 0, 0, 0, 1, -4, -24);
-    assertEquals(2, queues.getCorruptReplOneBlockSize());
-  }
-
-  @Test
-  public void testStripedBlockPriorities() throws Throwable {
-    int dataBlkNum = ecPolicy.getNumDataUnits();
-    int parityBlkNUm = ecPolicy.getNumParityUnits();
-    doTestStripedBlockPriorities(1, parityBlkNUm);
-    doTestStripedBlockPriorities(dataBlkNum, parityBlkNUm);
-  }
-
-  private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum)
-      throws Throwable {
-    int groupSize = dataBlkNum + parityBlkNum;
-    long numBytes = ecPolicy.getCellSize() * dataBlkNum;
-    UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
-    int numUR = 0;
-    int numCorrupt = 0;
-
-    // add under replicated blocks
-    for (int i = 0; dataBlkNum + i < groupSize; i++) {
-      BlockInfo block = genStripedBlockInfo(-100 - 100 * i, numBytes);
-      assertAdded(queues, block, dataBlkNum + i, 0, groupSize);
-      numUR++;
-      assertEquals(numUR, queues.getUnderReplicatedBlockCount());
-      assertEquals(numUR + numCorrupt, queues.size());
-      if (i == 0) {
-        assertInLevel(queues, block,
-            UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-      } else if (i * 3 < parityBlkNum + 1) {
-        assertInLevel(queues, block,
-            UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
-      } else {
-        assertInLevel(queues, block,
-            UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-      }
-    }
-
-    // add a corrupted block
-    BlockInfo block_corrupt = genStripedBlockInfo(-10, numBytes);
-    assertEquals(numCorrupt, queues.getCorruptBlockSize());
-    assertAdded(queues, block_corrupt, dataBlkNum - 1, 0, groupSize);
-    numCorrupt++;
-    assertEquals(numUR + numCorrupt, queues.size());
-    assertEquals(numUR, queues.getUnderReplicatedBlockCount());
-    assertEquals(numCorrupt, queues.getCorruptBlockSize());
-    assertInLevel(queues, block_corrupt,
-        UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
-  }
-
-  private void assertAdded(UnderReplicatedBlocks queues,
-                           BlockInfo block,
-                           int curReplicas,
-                           int decomissionedReplicas,
-                           int expectedReplicas) {
-    assertTrue("Failed to add " + block,
-               queues.add(block,
-                          curReplicas, 0,
-                          decomissionedReplicas,
-                          expectedReplicas));
-  }
-
-  /**
-   * Determine whether or not a block is in a level without changing the API.
-   * Instead get the per-level iterator and run though it looking for a match.
-   * If the block is not found, an assertion is thrown.
-   *
-   * This is inefficient, but this is only a test case.
-   * @param queues queues to scan
-   * @param block block to look for
-   * @param level level to select
-   */
-  private void assertInLevel(UnderReplicatedBlocks queues,
-                             Block block,
-                             int level) {
-    final Iterator<BlockInfo> bi = queues.iterator(level);
-    while (bi.hasNext()) {
-      Block next = bi.next();
-      if (block.equals(next)) {
-        return;
-      }
-    }
-    fail("Block " + block + " not found in level " + level);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32d043d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index 690812f..058ab8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -152,7 +152,7 @@ public class TestMetaSave {
       line = reader.readLine();
       assertTrue(line.equals("Dead Datanodes: 1"));
       line = reader.readLine();
-      assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
+      assertTrue(line.equals("Metasave: Blocks waiting for reconstruction: 0"));
       line = reader.readLine();
       assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
       line = reader.readLine();


Mime
View raw message