hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kih...@apache.org
Subject hadoop git commit: HDFS-11755. Underconstruction blocks can be considered missing. Contributed by Nathan Roberts.
Date Wed, 10 May 2017 19:23:34 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ef120bed7 -> a8f209dee


HDFS-11755. Underconstruction blocks can be considered missing. Contributed by Nathan Roberts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8f209de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8f209de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8f209de

Branch: refs/heads/branch-2.8
Commit: a8f209dee781259ef053c8d3c882d8b57799cef8
Parents: ef120be
Author: Kihwal Lee <kihwal@apache.org>
Authored: Wed May 10 14:22:17 2017 -0500
Committer: Kihwal Lee <kihwal@apache.org>
Committed: Wed May 10 14:22:17 2017 -0500

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  2 +-
 .../hdfs/server/namenode/CacheManager.java      |  3 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |  1 +
 .../blockmanagement/TestBlockManager.java       | 72 ++++++++++++++++++--
 4 files changed, 68 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f209de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6ff8a62..7ade4ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3514,7 +3514,7 @@ public class BlockManager implements BlockStatsMXBean {
       final int curReplicasDelta, int expectedReplicasDelta) {
     namesystem.writeLock();
     try {
-      if (!isPopulatingReplQueues()) {
+      if (!isPopulatingReplQueues() || !block.isComplete()) {
         return;
       }
       NumberReplicas repl = countNodes(block);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f209de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index 12e64f6..01f70b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -100,7 +100,7 @@ import com.google.common.collect.Lists;
  * caching directives, we will schedule caching and uncaching work.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
-public final class CacheManager {
+public class CacheManager {
   public static final Logger LOG = LoggerFactory.getLogger(CacheManager.class);
 
   private static final float MIN_CACHED_BLOCKS_PERCENT = 0.001f;
@@ -188,7 +188,6 @@ public final class CacheManager {
       this.directives = directives;
     }
   }
-
   CacheManager(FSNamesystem namesystem, Configuration conf,
       BlockManager blockManager) {
     this.namesystem = namesystem;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f209de/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 5477700..59ba376 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -274,6 +274,7 @@ public class TestFileCorruption {
       out.write(outBuffer, 0, bufferSize);
       out.close();
       dfs.setReplication(filePath, (short) 10);
+      cluster.triggerBlockReports();
       // underreplicated Blocks should be one after setrep
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override public Boolean get() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f209de/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index f4de968..b846658 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -78,11 +78,15 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
+import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -97,6 +101,8 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.hadoop.util.GSet;
+import org.apache.hadoop.util.LightWeightGSet;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.Assert;
@@ -141,6 +147,19 @@ public class TestBlockManager {
     Mockito.doReturn(true).when(fsn).hasReadLock();
     Mockito.doReturn(true).when(fsn).isRunning();
     bm = new BlockManager(fsn, conf);
+    //Make shouldPopulaeReplQueues return true
+    HAContext haContext = Mockito.mock(HAContext.class);
+    HAState haState = Mockito.mock(HAState.class);
+    Mockito.when(haContext.getState()).thenReturn(haState);
+    Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
+    Mockito.when(fsn.getHAContext()).thenReturn(haContext);
+    bm.setInitializedReplQueues(true);
+    CacheManager cm = Mockito.mock(CacheManager.class);
+    Mockito.doReturn(cm).when(fsn).getCacheManager();
+    GSet<CachedBlock, CachedBlock> cb =
+        new LightWeightGSet<CachedBlock, CachedBlock>(1);
+    Mockito.when(cm.getCachedBlocks()).thenReturn(cb);
+
     final String[] racks = {
         "/rackA",
         "/rackA",
@@ -515,7 +534,7 @@ public class TestBlockManager {
     }
     return ret;
   }
-  
+
   private List<DatanodeDescriptor> startDecommission(int ... indexes) {
     List<DatanodeDescriptor> nodes = getNodes(indexes);
     for (DatanodeDescriptor node : nodes) {
@@ -848,6 +867,42 @@ public class TestBlockManager {
         (ds) >= 0);
   }
 
+  @Test
+  public void testUCBlockNotConsideredMissing() throws Exception {
+    DatanodeDescriptor node = nodes.get(0);
+    DatanodeStorageInfo ds = node.getStorageInfos()[0];
+    node.setAlive(true);
+    DatanodeRegistration nodeReg =
+        new DatanodeRegistration(node, null, null, "");
+
+    // register new node
+    bm.getDatanodeManager().registerDatanode(nodeReg);
+    bm.getDatanodeManager().addDatanode(node);
+
+    // Build an incremental report
+    List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
+
+    // blk_42 is under construction, finalizes on one node and is
+    // immediately deleted on same node
+    long blockId = 42;  // arbitrary
+    BlockInfo receivedBlock = addUcBlockToBM(blockId);
+
+    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
+        ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
+    rdbiList.add(new ReceivedDeletedBlockInfo(
+        new Block(blockId),
+        ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
+
+    // process IBR
+    StorageReceivedDeletedBlocks srdb =
+        new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
+            rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
+    bm.processIncrementalBlockReport(node, srdb);
+    // Needed replications should still be 0.
+    assertEquals("UC block was incorrectly added to needed Replications",
+        0, bm.neededReplications.size());
+  }
+
   private BlockInfo addBlockToBM(long blkId) {
     Block block = new Block(blkId);
     BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
@@ -1180,14 +1235,17 @@ public class TestBlockManager {
     FileInputStream fstream = new FileInputStream(file);
     DataInputStream in = new DataInputStream(fstream);
     BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+    String corruptBlocksLine;
+    Boolean foundIt = false;
     try {
-      for(int i =0;i<6;i++) {
-        reader.readLine();
+      while ((corruptBlocksLine = reader.readLine()) != null) {
+        if (corruptBlocksLine.compareTo("Corrupt Blocks:") == 0) {
+          foundIt = true;
+          break;
+        }
       }
-      String corruptBlocksLine = reader.readLine();
-      assertEquals("Unexpected text in metasave," +
-              "was expecting corrupt blocks section!", 0,
-          corruptBlocksLine.compareTo("Corrupt Blocks:"));
+      assertTrue("Unexpected text in metasave," +
+              "was expecting corrupt blocks section!", foundIt);
       corruptBlocksLine = reader.readLine();
       String regex = "Block=[0-9]+\\tNode=.*\\tStorageID=.*StorageState.*" +
           "TotalReplicas=.*Reason=GENSTAMP_MISMATCH";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message