hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From l..@apache.org
Subject hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).
Date Thu, 05 Apr 2018 17:08:33 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f2d89d7bf -> 9fe337096


HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped
block. (Contributed by Lei (Eddy) Xu).

(cherry picked from commit 1cbf23df145af01692b8aaa438642b64e330cd05)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fe33709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fe33709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fe33709

Branch: refs/heads/branch-3.0
Commit: 9fe337096dbf5887456c04657ab80a85d4943383
Parents: f2d89d7
Author: Lei Xu <lei@apache.org>
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu <lei@apache.org>
Committed: Thu Apr 5 09:54:15 2018 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockIdManager.java  | 17 ++++++
 .../server/blockmanagement/BlockManager.java    |  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java     | 35 +++++------
 .../blockmanagement/InvalidateBlocks.java       | 13 +++--
 .../blockmanagement/TestBlockManager.java       | 61 ++++++++++++++++----
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++++++++++-----
 8 files changed, 136 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 8463023..7fcd698 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -237,6 +237,23 @@ public class BlockIdManager {
     legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+    return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
     return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9b8f74c..8761d31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -450,7 +450,8 @@ public class BlockManager implements BlockStatsMXBean {
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
     invalidateBlocks = new InvalidateBlocks(
         datanodeManager.getBlockInvalidateLimit(),
-        startupDelayBlockDeletionInMs);
+        startupDelayBlockDeletionInMs,
+        blockIdManager);
 
     // Compute the map capacity by allocating 2% of total memory
     blocksMap = new BlocksMap(
@@ -1667,7 +1668,7 @@ public class BlockManager implements BlockStatsMXBean {
       corrupted.setBlockId(b.getStored().getBlockId());
     }
     corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-        b.getReasonCode());
+        b.getReasonCode(), b.getStored().isStriped());
 
     NumberReplicas numberOfReplicas = countNodes(b.getStored());
     boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index daa3d8b..8de17ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -486,7 +486,7 @@ class BlockManagerSafeMode {
 
     if (!blockManager.getShouldPostponeBlocksFromFuture() &&
         !inRollBack && blockManager.isGenStampInFuture(brr)) {
-      if (BlockIdManager.isStripedBlockID(brr.getBlockId())) {
+      if (blockManager.getBlockIdManager().isStripedBlock(brr)) {
         bytesInFutureECBlockGroups.add(brr.getBytesOnDisk());
       } else {
         bytesInFutureBlocks.add(brr.getBytesOnDisk());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 6f13da9..a96c815 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -93,7 +93,7 @@ class BlocksMap {
    * remove it from all data-node lists it belongs to;
    * and remove all data-node locations associated with the block.
    */
-  void removeBlock(Block block) {
+  void removeBlock(BlockInfo block) {
     BlockInfo blockInfo = blocks.remove(block);
     if (blockInfo == null) {
       return;
@@ -175,7 +175,7 @@ class BlocksMap {
     if (info.hasNoStorage()    // no datanodes left
         && info.isDeleted()) { // does not belong to a file
       blocks.remove(b);  // remove block from the map
-      decrementBlockStat(b);
+      decrementBlockStat(info);
     }
     return removed;
   }
@@ -207,16 +207,16 @@ class BlocksMap {
     return capacity;
   }
 
-  private void incrementBlockStat(Block block) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  private void incrementBlockStat(BlockInfo block) {
+    if (block.isStriped()) {
       totalECBlockGroups.increment();
     } else {
       totalReplicatedBlocks.increment();
     }
   }
 
-  private void decrementBlockStat(Block block) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  private void decrementBlockStat(BlockInfo block) {
+    if (block.isStriped()) {
       totalECBlockGroups.decrement();
       assert totalECBlockGroups.longValue() >= 0 :
           "Total number of ec block groups should be non-negative";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 7a576bb..fe1224c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -69,12 +69,12 @@ public class CorruptReplicasMap{
    * @param reasonCode the enum representation of the reason
    */
   void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
-      String reason, Reason reasonCode) {
+      String reason, Reason reasonCode, boolean isStriped) {
     Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
     if (nodes == null) {
       nodes = new HashMap<DatanodeDescriptor, Reason>();
       corruptReplicasMap.put(blk, nodes);
-      incrementBlockStat(blk);
+      incrementBlockStat(isStriped);
     }
     
     String reasonText;
@@ -103,11 +103,11 @@ public class CorruptReplicasMap{
    * Remove Block from CorruptBlocksMap.
    * @param blk Block to be removed
    */
-  void removeFromCorruptReplicasMap(Block blk) {
+  void removeFromCorruptReplicasMap(BlockInfo blk) {
     if (corruptReplicasMap != null) {
       Map<DatanodeDescriptor, Reason> value = corruptReplicasMap.remove(blk);
       if (value != null) {
-        decrementBlockStat(blk);
+        decrementBlockStat(blk.isStriped());
       }
     }
   }
@@ -119,12 +119,13 @@ public class CorruptReplicasMap{
    * @return true if the removal is successful; 
              false if the replica is not in the map
    */ 
-  boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) {
+  boolean removeFromCorruptReplicasMap(
+      BlockInfo blk, DatanodeDescriptor datanode) {
     return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY);
   }
 
-  boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode,
-      Reason reason) {
+  boolean removeFromCorruptReplicasMap(
+      BlockInfo blk, DatanodeDescriptor datanode, Reason reason) {
     Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk);
     if (datanodes == null) {
       return false;
@@ -141,23 +142,23 @@ public class CorruptReplicasMap{
       if (datanodes.isEmpty()) {
         // remove the block if there is no more corrupted replicas
         corruptReplicasMap.remove(blk);
-        decrementBlockStat(blk);
+        decrementBlockStat(blk.isStriped());
       }
       return true;
     }
     return false;
   }
 
-  private void incrementBlockStat(Block block) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  private void incrementBlockStat(boolean isStriped) {
+    if (isStriped) {
       totalCorruptECBlockGroups.increment();
     } else {
       totalCorruptBlocks.increment();
     }
   }
 
-  private void decrementBlockStat(Block block) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  private void decrementBlockStat(boolean isStriped) {
+    if (isStriped) {
       totalCorruptECBlockGroups.decrement();
     } else {
       totalCorruptBlocks.decrement();
@@ -205,6 +206,8 @@ public class CorruptReplicasMap{
    * is null, up to numExpectedBlocks blocks are returned from the beginning.
    * If startingBlockId cannot be found, null is returned.
    *
+   * @param bim BlockIdManager to determine the block type.
+   * @param blockType desired block type to return.
    * @param numExpectedBlocks Number of block ids to return.
    *  0 <= numExpectedBlocks <= 100
    * @param startingBlockId Block id from which to start. If null, start at
@@ -212,7 +215,7 @@ public class CorruptReplicasMap{
    * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
    */
   @VisibleForTesting
-  long[] getCorruptBlockIdsForTesting(BlockType blockType,
+  long[] getCorruptBlockIdsForTesting(BlockIdManager bim, BlockType blockType,
       int numExpectedBlocks, Long startingBlockId) {
     if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
       return null;
@@ -223,11 +226,9 @@ public class CorruptReplicasMap{
         .stream()
         .filter(r -> {
           if (blockType == BlockType.STRIPED) {
-            return BlockIdManager.isStripedBlockID(r.getBlockId()) &&
-                r.getBlockId() >= cursorBlockId;
+            return bim.isStripedBlock(r) && r.getBlockId() >= cursorBlockId;
           } else {
-            return !BlockIdManager.isStripedBlockID(r.getBlockId()) &&
-                r.getBlockId() >= cursorBlockId;
+            return !bim.isStripedBlock(r) && r.getBlockId() >= cursorBlockId;
           }
         })
         .sorted()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index ff68c02..7c83792 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -57,6 +57,7 @@ class InvalidateBlocks {
   private final LongAdder numBlocks = new LongAdder();
   private final LongAdder numECBlocks = new LongAdder();
   private final int blockInvalidateLimit;
+  private final BlockIdManager blockIdManager;
 
   /**
    * The period of pending time for block invalidation since the NameNode
@@ -66,9 +67,11 @@ class InvalidateBlocks {
   /** the startup time */
   private final long startupTime = Time.monotonicNow();
 
-  InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs) {
+  InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs,
+                   final BlockIdManager blockIdManager) {
     this.blockInvalidateLimit = blockInvalidateLimit;
     this.pendingPeriodInMs = pendingPeriodInMs;
+    this.blockIdManager = blockIdManager;
     printBlockDeletionTime(BlockManager.LOG);
   }
 
@@ -124,7 +127,7 @@ class InvalidateBlocks {
 
   private LightWeightHashSet<Block> getBlocksSet(final DatanodeInfo dn,
       final Block block) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+    if (blockIdManager.isStripedBlock(block)) {
       return getECBlocksSet(dn);
     } else {
       return getBlocksSet(dn);
@@ -133,7 +136,7 @@ class InvalidateBlocks {
 
   private void putBlocksSet(final DatanodeInfo dn, final Block block,
       final LightWeightHashSet set) {
-    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+    if (blockIdManager.isStripedBlock(block)) {
       assert getECBlocksSet(dn) == null;
       nodeToECBlocks.put(dn, set);
     } else {
@@ -178,7 +181,7 @@ class InvalidateBlocks {
       putBlocksSet(datanode, block, set);
     }
     if (set.add(block)) {
-      if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      if (blockIdManager.isStripedBlock(block)) {
         numECBlocks.increment();
       } else {
         numBlocks.increment();
@@ -206,7 +209,7 @@ class InvalidateBlocks {
   synchronized void remove(final DatanodeInfo dn, final Block block) {
     final LightWeightHashSet<Block> v = getBlocksSet(dn, block);
     if (v != null && v.remove(block)) {
-      if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      if (blockIdManager.isStripedBlock(block)) {
         numECBlocks.decrement();
       } else {
         numBlocks.decrement();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index f63be91..5219a44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.BlockType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -114,10 +116,13 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestBlockManager {
   private DatanodeStorageInfo[] storages;
@@ -1343,14 +1348,14 @@ public class TestBlockManager {
     spyBM.createLocatedBlocks(new BlockInfo[]{blockInfo}, 3L, false, 0L, 3L,
         false, false, null, null);
     verify(spyBM, Mockito.atLeast(0)).
-        isReplicaCorrupt(Mockito.any(BlockInfo.class),
-            Mockito.any(DatanodeDescriptor.class));
+        isReplicaCorrupt(any(BlockInfo.class),
+            any(DatanodeDescriptor.class));
     addCorruptBlockOnNodes(0, origNodes);
     spyBM.createLocatedBlocks(new BlockInfo[]{blockInfo}, 3L, false, 0L, 3L,
         false, false, null, null);
     verify(spyBM, Mockito.atLeast(1)).
-        isReplicaCorrupt(Mockito.any(BlockInfo.class),
-            Mockito.any(DatanodeDescriptor.class));
+        isReplicaCorrupt(any(BlockInfo.class),
+            any(DatanodeDescriptor.class));
   }
 
   @Test (timeout = 300000)
@@ -1506,8 +1511,8 @@ public class TestBlockManager {
         blockInfo.getGenerationStamp() + 1,
         blockInfo.getNumBytes(),
         new DatanodeStorageInfo[]{});
-    BlockCollection mockedBc = Mockito.mock(BlockCollection.class);
-    Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
+    BlockCollection mockedBc = mock(BlockCollection.class);
+    when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
     bm.checkRedundancy(mockedBc);
     return blockInfo;
   }
@@ -1524,8 +1529,8 @@ public class TestBlockManager {
     Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
     bm.blocksMap.addBlockCollection(blockInfo, bc);
     nodesList.get(0).setInMaintenance();
-    BlockCollection mockedBc = Mockito.mock(BlockCollection.class);
-    Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
+    BlockCollection mockedBc = mock(BlockCollection.class);
+    when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
     bm.checkRedundancy(mockedBc);
     return blockInfo;
   }
@@ -1580,8 +1585,8 @@ public class TestBlockManager {
     Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
     bm.blocksMap.addBlockCollection(blockInfo, bc);
     nodesList.get(0).startDecommission();
-    BlockCollection mockedBc = Mockito.mock(BlockCollection.class);
-    Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
+    BlockCollection mockedBc = mock(BlockCollection.class);
+    when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
     bm.checkRedundancy(mockedBc);
     return blockInfo;
   }
@@ -1623,4 +1628,40 @@ public class TestBlockManager {
     }
   }
 
+  @Test
+  public void testLegacyBlockInInvalidateBlocks() {
+    final long legancyGenerationStampLimit = 10000;
+    BlockIdManager bim = Mockito.mock(BlockIdManager.class);
+
+    when(bim.getLegacyGenerationStampLimit())
+        .thenReturn(legancyGenerationStampLimit);
+    when(bim.isStripedBlock(any(Block.class))).thenCallRealMethod();
+    when(bim.isLegacyBlock(any(Block.class))).thenCallRealMethod();
+
+    InvalidateBlocks ibs = new InvalidateBlocks(100, 30000, bim);
+
+    Block legacy = new Block(-1, 10, legancyGenerationStampLimit / 10);
+    Block striped = new Block(
+        bm.nextBlockId(BlockType.STRIPED), 10,
+        legancyGenerationStampLimit + 10);
+
+    DatanodeInfo legacyDnInfo = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo stripedDnInfo = DFSTestUtil.getLocalDatanodeInfo();
+
+    ibs.add(legacy, legacyDnInfo, false);
+    assertEquals(1, ibs.getBlocks());
+    assertEquals(0, ibs.getECBlocks());
+
+    ibs.add(striped, stripedDnInfo, false);
+    assertEquals(1, ibs.getBlocks());
+    assertEquals(1, ibs.getECBlocks());
+
+    ibs.remove(legacyDnInfo);
+    assertEquals(0, ibs.getBlocks());
+    assertEquals(1, ibs.getECBlocks());
+
+    ibs.remove(stripedDnInfo);
+    assertEquals(0, ibs.getBlocks());
+    assertEquals(0, ibs.getECBlocks());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 3510bc3..299df56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -30,10 +32,12 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 /**
@@ -46,27 +50,31 @@ public class TestCorruptReplicaInfo {
   
   private static final Log LOG = LogFactory.getLog(
       TestCorruptReplicaInfo.class);
-  private final Map<Long, Block> replicaMap = new HashMap<>();
-  private final Map<Long, Block> stripedBlocksMap = new HashMap<>();
+  private final Map<Long, BlockInfo> replicaMap = new HashMap<>();
+  private final Map<Long, BlockInfo> stripedBlocksMap = new HashMap<>();
 
   // Allow easy block creation by block id. Return existing
   // replica block if one with same block id already exists.
-  private Block getReplica(Long blockId) {
+  private BlockInfo getReplica(Long blockId) {
     if (!replicaMap.containsKey(blockId)) {
-      replicaMap.put(blockId, new Block(blockId, 0, 0));
+      short replFactor = 3;
+      replicaMap.put(blockId,
+          new BlockInfoContiguous(new Block(blockId, 0, 0), replFactor));
     }
     return replicaMap.get(blockId);
   }
 
-  private Block getReplica(int blkId) {
+  private BlockInfo getReplica(int blkId) {
     return getReplica(Long.valueOf(blkId));
   }
 
-  private Block getStripedBlock(int blkId) {
+  private BlockInfo getStripedBlock(int blkId) {
     Long stripedBlockId = (1L << 63) + blkId;
     assertTrue(BlockIdManager.isStripedBlockID(stripedBlockId));
     if (!stripedBlocksMap.containsKey(stripedBlockId)) {
-      stripedBlocksMap.put(stripedBlockId, new Block(stripedBlockId, 1024, 0));
+      stripedBlocksMap.put(stripedBlockId,
+          new BlockInfoStriped(new Block(stripedBlockId, 1024, 0),
+              StripedFileTestUtil.getDefaultECPolicy()));
     }
     return stripedBlocksMap.get(stripedBlockId);
   }
@@ -88,6 +96,10 @@ public class TestCorruptReplicaInfo {
   public void testCorruptReplicaInfo()
       throws IOException, InterruptedException {
     CorruptReplicasMap crm = new CorruptReplicasMap();
+    BlockIdManager bim = Mockito.mock(BlockIdManager.class);
+    when(bim.isLegacyBlock(any(Block.class))).thenReturn(false);
+    when(bim.isStripedBlock(any(Block.class))).thenCallRealMethod();
+    assertTrue(!bim.isLegacyBlock(new Block(-1)));
 
     // Make sure initial values are returned correctly
     assertEquals("Total number of corrupt blocks must initially be 0!",
@@ -97,10 +109,11 @@ public class TestCorruptReplicaInfo {
     assertEquals("Number of corrupt striped block groups must initially be 0!",
         0, crm.getCorruptECBlockGroups());
     assertNull("Param n cannot be less than 0",
-        crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, -1, null));
+        crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, -1, null));
     assertNull("Param n cannot be greater than 100",
-        crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 101, null));
-    long[] l = crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 0, null);
+        crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, 101, null));
+    long[] l = crm.getCorruptBlockIdsForTesting(
+        bim, BlockType.CONTIGUOUS, 0, null);
     assertNotNull("n = 0 must return non-null", l);
     assertEquals("n = 0 must return an empty list", 0, l.length);
 
@@ -156,22 +169,25 @@ public class TestCorruptReplicaInfo {
         2 * blockCount, crm.size());
     assertTrue("First five corrupt replica blocks ids are not right!",
         Arrays.equals(Arrays.copyOfRange(replicaIds, 0, 5),
-            crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 5, null)));
+            crm.getCorruptBlockIdsForTesting(
+                bim, BlockType.CONTIGUOUS, 5, null)));
     assertTrue("First five corrupt striped blocks ids are not right!",
         Arrays.equals(Arrays.copyOfRange(stripedIds, 0, 5),
-            crm.getCorruptBlockIdsForTesting(BlockType.STRIPED, 5, null)));
+            crm.getCorruptBlockIdsForTesting(
+                bim, BlockType.STRIPED, 5, null)));
 
     assertTrue("10 replica blocks after 7 not returned correctly!",
         Arrays.equals(Arrays.copyOfRange(replicaIds, 7, 17),
-            crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 10, 7L)));
+            crm.getCorruptBlockIdsForTesting(
+                bim, BlockType.CONTIGUOUS, 10, 7L)));
     assertTrue("10 striped blocks after 7 not returned correctly!",
         Arrays.equals(Arrays.copyOfRange(stripedIds, 7, 17),
-            crm.getCorruptBlockIdsForTesting(BlockType.STRIPED,
+            crm.getCorruptBlockIdsForTesting(bim, BlockType.STRIPED,
                 10, getStripedBlock(7).getBlockId())));
   }
   
   private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
-      Block blk, DatanodeDescriptor dn) {
-    crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE);
+      BlockInfo blk, DatanodeDescriptor dn) {
+    crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE, blk.isStriped());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message