hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject hadoop git commit: Revert "HDFS-10301. Interleaving processing of storages from repeated block reports causes false zombie storage detection, removes valid blocks. Contributed by Vinitha Gankidi."
Date Tue, 02 Aug 2016 17:57:12 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a76eb7fa3 -> 0c3282b35


Revert "HDFS-10301. Interleaving processing of storages from repeated block reports causes
false zombie storage detection, removes valid blocks. Contributed by Vinitha Gankidi."

This reverts commit a76eb7fa34b5932a4eaceeaf9a7df0f68462b2a1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c3282b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c3282b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c3282b3

Branch: refs/heads/branch-2.7
Commit: 0c3282b35c4bce7dd4c12d593f2547c66f1fd891
Parents: a76eb7f
Author: Konstantin V Shvachko <shv@apache.org>
Authored: Tue Aug 2 10:53:06 2016 -0700
Committer: Konstantin V Shvachko <shv@apache.org>
Committed: Tue Aug 2 10:53:06 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 45 ------------
 .../server/blockmanagement/BlockManager.java    | 49 ++++++-------
 .../blockmanagement/DatanodeDescriptor.java     | 30 +++++++-
 .../blockmanagement/DatanodeStorageInfo.java    | 11 +++
 .../hdfs/server/datanode/BPServiceActor.java    | 33 ++-------
 .../hdfs/server/namenode/NameNodeRpcServer.java | 40 ++++-------
 .../blockmanagement/TestBlockManager.java       | 10 +--
 .../TestNameNodePrunesMissingStorages.java      | 74 --------------------
 ...TestDnRespectsBlockReportSplitThreshold.java | 33 +--------
 .../TestNNHandlesBlockReportPerStorage.java     | 32 ++-------
 10 files changed, 93 insertions(+), 264 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index ef8d178..1c89ee4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -57,34 +57,6 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica>
{
     public Iterator<BlockReportReplica> iterator() {
       return Collections.emptyIterator();
     }
-    @Override
-    public boolean isStorageReport() {
-      return false;
-    }
-  };
-
-  // STORAGE_REPORT is used to report all storages in the DN
-  public static final BlockListAsLongs STORAGE_REPORT = new BlockListAsLongs() {
-    @Override
-    public int getNumberOfBlocks() {
-      return -1;
-    }
-    @Override
-    public ByteString getBlocksBuffer() {
-      return ByteString.EMPTY;
-    }
-    @Override
-    public long[] getBlockListAsLongs() {
-      return EMPTY_LONGS;
-    }
-    @Override
-    public Iterator<BlockReportReplica> iterator() {
-      return Collections.emptyIterator();
-    }
-    @Override
-    public boolean isStorageReport() {
-      return true;
-    }
   };
 
   /**
@@ -201,13 +173,6 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica>
{
   abstract public long[] getBlockListAsLongs();
 
   /**
-   * Return true for STORAGE_REPORT BlocksListsAsLongs.
-   * Otherwise return false.
-   * @return boolean
-   */
-  abstract public boolean isStorageReport();
-
-  /**
    * Returns a singleton iterator over blocks in the block report.  Do not
    * add the returned blocks to a collection.
    * @return Iterator
@@ -341,11 +306,6 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica>
{
     }
 
     @Override
-    public boolean isStorageReport() {
-      return false;
-    }
-
-    @Override
     public Iterator<BlockReportReplica> iterator() {
       return new Iterator<BlockReportReplica>() {
         final BlockReportReplica block = new BlockReportReplica();
@@ -421,11 +381,6 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica>
{
     }
 
     @Override
-    public boolean isStorageReport() {
-      return false;
-    }
-
-    @Override
     public Iterator<BlockReportReplica> iterator() {
       return new Iterator<BlockReportReplica>() {
         private final BlockReportReplica block = new BlockReportReplica();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6531cda..984fef6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -77,7 +77,6 @@ import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
@@ -1801,8 +1800,8 @@ public class BlockManager {
    */
   public boolean processReport(final DatanodeID nodeID,
       final DatanodeStorage storage,
-      final BlockListAsLongs newReport,
-      BlockReportContext context) throws IOException {
+      final BlockListAsLongs newReport, BlockReportContext context,
+      boolean lastStorageInRpc) throws IOException {
     namesystem.writeLock();
     final long startTime = Time.monotonicNow(); //after acquiring write lock
     final long endTime;
@@ -1842,9 +1841,27 @@ public class BlockManager {
       
       storageInfo.receivedBlockReport();
       if (context != null) {
-        LOG.debug("Processing RPC with index {} out of total {} RPCs in "
-                + "processReport 0x{}", context.getCurRpc(),
-            context.getTotalRpcs(), Long.toHexString(context.getReportId()));
+        storageInfo.setLastBlockReportId(context.getReportId());
+        if (lastStorageInRpc) {
+          int rpcsSeen = node.updateBlockReportContext(context);
+          if (rpcsSeen >= context.getTotalRpcs()) {
+            List<DatanodeStorageInfo> zombies = node.removeZombieStorages();
+            if (zombies.isEmpty()) {
+              LOG.debug("processReport 0x{}: no zombie storages found.",
+                  Long.toHexString(context.getReportId()));
+            } else {
+              for (DatanodeStorageInfo zombie : zombies) {
+                removeZombieReplicas(context, zombie);
+              }
+            }
+            node.clearBlockReportContext();
+          } else {
+            LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
+                    "report.", Long.toHexString(context.getReportId()),
+                (context.getTotalRpcs() - rpcsSeen)
+            );
+          }
+        }
       }
     } finally {
       endTime = Time.monotonicNow();
@@ -1870,26 +1887,6 @@ public class BlockManager {
     return !node.hasStaleStorages();
   }
 
-  public void removeZombieStorages(DatanodeRegistration nodeReg,
-      BlockReportContext context, Set<String> storageIDsInBlockReport)
-      throws UnregisteredNodeException {
-    namesystem.writeLock();
-    DatanodeDescriptor node = this.getDatanodeManager().getDatanode(nodeReg);
-    if (node != null) {
-      List<DatanodeStorageInfo> zombies =
-          node.removeZombieStorages(storageIDsInBlockReport);
-      if (zombies.isEmpty()) {
-        LOG.debug("processReport 0x{}: no zombie storages found.",
-            Long.toHexString(context.getReportId()));
-      } else {
-        for (DatanodeStorageInfo zombie : zombies) {
-          this.removeZombieReplicas(context, zombie);
-        }
-      }
-    }
-    namesystem.writeUnlock();
-  }
-
   private void removeZombieReplicas(BlockReportContext context,
       DatanodeStorageInfo zombie) {
     LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 968bbbf..5890855 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -65,6 +67,24 @@ public class DatanodeDescriptor extends DatanodeInfo {
   // If node is not decommissioning, do not use this object for anything.
   public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
 
+  private long curBlockReportId = 0;
+
+  private BitSet curBlockReportRpcsSeen = null;
+
+  public int updateBlockReportContext(BlockReportContext context) {
+    if (curBlockReportId != context.getReportId()) {
+      curBlockReportId = context.getReportId();
+      curBlockReportRpcsSeen = new BitSet(context.getTotalRpcs());
+    }
+    curBlockReportRpcsSeen.set(context.getCurRpc());
+    return curBlockReportRpcsSeen.cardinality();
+  }
+
+  public void clearBlockReportContext() {
+    curBlockReportId = 0;
+    curBlockReportRpcsSeen = null;
+  }
+
   /** Block and targets pair */
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
@@ -289,8 +309,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   static final private List<DatanodeStorageInfo> EMPTY_STORAGE_INFO_LIST =
       ImmutableList.of();
 
-  List<DatanodeStorageInfo>
-  removeZombieStorages(Set<String> storageIDsInBlockReport) {
+  List<DatanodeStorageInfo> removeZombieStorages() {
     List<DatanodeStorageInfo> zombies = null;
     synchronized (storageMap) {
       Iterator<Map.Entry<String, DatanodeStorageInfo>> iter =
@@ -298,13 +317,18 @@ public class DatanodeDescriptor extends DatanodeInfo {
       while (iter.hasNext()) {
         Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
         DatanodeStorageInfo storageInfo = entry.getValue();
-        if (!storageIDsInBlockReport.contains(storageInfo.getStorageID())) {
+        if (storageInfo.getLastBlockReportId() != curBlockReportId) {
+          LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" +
+              Long.toHexString(storageInfo.getLastBlockReportId()) +
+              ", but curBlockReportId = 0x" +
+              Long.toHexString(curBlockReportId));
           iter.remove();
           if (zombies == null) {
             zombies = new LinkedList<DatanodeStorageInfo>();
           }
           zombies.add(storageInfo);
         }
+        storageInfo.setLastBlockReportId(0);
       }
     }
     return zombies == null ? EMPTY_STORAGE_INFO_LIST : zombies;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index a70b08f..b3e45f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -115,6 +115,9 @@ public class DatanodeStorageInfo {
   private volatile BlockInfoContiguous blockList = null;
   private int numBlocks = 0;
 
+  // The ID of the last full block report which updated this storage.
+  private long lastBlockReportId = 0;
+
   /** The number of block reports received */
   private int blockReportCount = 0;
 
@@ -179,6 +182,14 @@ public class DatanodeStorageInfo {
     this.blockPoolUsed = blockPoolUsed;
   }
 
+  long getLastBlockReportId() {
+    return lastBlockReportId;
+  }
+
+  void setLastBlockReportId(long lastBlockReportId) {
+    this.lastBlockReportId = lastBlockReportId;
+  }
+
   State getState() {
     return this.state;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 672c712..05aca6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -471,35 +471,10 @@ class BPServiceActor implements Runnable {
       } else {
         // Send one block report per message.
         for (int r = 0; r < reports.length; r++) {
-          StorageBlockReport[] singleReport = {reports[r]};
-          DatanodeCommand cmd;
-          if (r != reports.length - 1) {
-            cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(),
-                singleReport, new BlockReportContext(reports.length, r,
-                    reportId));
-          } else {
-            StorageBlockReport[] lastSplitReport =
-                new StorageBlockReport[perVolumeBlockLists.size()];
-            // When block reports are split, the last RPC in the block report
-            // has the information about all storages in the block report.
-            // See HDFS-10301 for more details. To achieve this, the last RPC
-            // has 'n' storage reports, where 'n' is the number of storages in
-            // a DN. The actual block replicas are reported only for the
-            // last/n-th storage.
-            i = 0;
-            for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair :
-                perVolumeBlockLists.entrySet()) {
-              lastSplitReport[i++] = new StorageBlockReport(
-                  kvPair.getKey(), BlockListAsLongs.STORAGE_REPORT);
-              if (i == r) {
-                lastSplitReport[i] = reports[r];
-                break;
-              }
-            }
-            cmd = bpNamenode.blockReport(
-                bpRegistration, bpos.getBlockPoolId(), lastSplitReport,
-                new BlockReportContext(reports.length, r, reportId));
-          }
+          StorageBlockReport singleReport[] = { reports[r] };
+          DatanodeCommand cmd = bpNamenode.blockReport(
+              bpRegistration, bpos.getBlockPoolId(), singleReport,
+              new BlockReportContext(reports.length, r, reportId));
           numReportsSent++;
           numRPCs++;
           if (cmd != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 7ff2bef..582e492 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1314,32 +1314,20 @@ class NameNodeRpcServer implements NamenodeProtocols {
     boolean noStaleStorages = false;
     for (int r = 0; r < reports.length; r++) {
       final BlockListAsLongs blocks = reports[r].getBlocks();
-      if (!blocks.isStorageReport()) {
-        //
-        // BlockManager.processReport accumulates information of prior calls
-        // for the same node and storage, so the value returned by the last
-        // call of this loop is the final updated value for noStaleStorage.
-        //
-        final int index = r;
-        noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {
-          @Override
-          public Boolean call()
-              throws IOException {
-            return bm.processReport(nodeReg, reports[index].getStorage(),
-                blocks, context);
-          }
-        });
-        metrics.incrStorageBlockReportOps();
-      }
-    }
-
-    if (nn.getFSImage().isUpgradeFinalized() &&
-        context.getTotalRpcs() == context.getCurRpc() + 1) {
-      Set<String> storageIDsInBlockReport = new HashSet<>();
-      for (StorageBlockReport report : reports) {
-        storageIDsInBlockReport.add(report.getStorage().getStorageID());
-      }
-      bm.removeZombieStorages(nodeReg, context, storageIDsInBlockReport);
+      //
+      // BlockManager.processReport accumulates information of prior calls
+      // for the same node and storage, so the value returned by the last
+      // call of this loop is the final updated value for noStaleStorage.
+      //
+      final int index = r;
+      noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {
+        @Override
+        public Boolean call() throws IOException {
+          return bm.processReport(nodeReg, reports[index].getStorage(),
+              blocks, context, (index == reports.length - 1));
+        }
+      });
+      metrics.incrStorageBlockReportOps();
     }
 
     if (nn.getFSImage().isUpgradeFinalized() &&

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 6e4b747..5f08886 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -678,12 +678,12 @@ public class TestBlockManager {
     reset(node);
     
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY, null);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
     // send block report again, should NOT be processed
     reset(node);
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY, null);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
 
     // re-register as if node restarted, should update existing node
@@ -694,7 +694,7 @@ public class TestBlockManager {
     // send block report, should be processed after restart
     reset(node);
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-                     BlockListAsLongs.EMPTY, null);
+                     BlockListAsLongs.EMPTY, null, false);
     // Reinitialize as registration with empty storage list pruned
     // node.storageMap.
     ds = node.getStorageInfos()[0];
@@ -723,7 +723,7 @@ public class TestBlockManager {
     reset(node);
     doReturn(1).when(node).numBlocks();
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY, null);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
   }
 
@@ -796,7 +796,7 @@ public class TestBlockManager {
     // Make sure it's the first full report
     assertEquals(0, ds.getBlockReportCount());
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        builder.build(), null);
+        builder.build(), null, false);
     assertEquals(1, ds.getBlockReportCount());
 
     // verify the storage info is correct

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 869a2b5..4b97d01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -19,12 +19,6 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import com.google.common.base.Supplier;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -35,17 +29,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -260,67 +249,4 @@ public class TestNameNodePrunesMissingStorages {
       }
     }
   }
-
-  @Test(timeout=300000)
-  public void testInterleavedFullBlockReports() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
-        36000000L);
-    int numStoragesPerDatanode = 6;
-    final MiniDFSCluster cluster = new MiniDFSCluster
-        .Builder(conf).numDataNodes(1)
-        .storagesPerDatanode(numStoragesPerDatanode)
-        .build();
-    try {
-      LOG.info("waiting for cluster to become active...");
-      cluster.waitActive();
-      // Get the datanode registration and the block reports
-      DataNode dn = cluster.getDataNodes().get(0);
-      final String blockPoolId = cluster.getNamesystem().getBlockPoolId();
-      LOG.info("Block pool id: " + blockPoolId);
-      final DatanodeRegistration dnR = dn.getDNRegistrationForBP(blockPoolId);
-      Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
-          dn.getFSDataset().getBlockReports(blockPoolId);
-      final StorageBlockReport[] reports =
-          new StorageBlockReport[perVolumeBlockLists.size()];
-      int reportIndex = 0;
-      for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair :
-          perVolumeBlockLists.entrySet()) {
-        DatanodeStorage dnStorage = kvPair.getKey();
-        BlockListAsLongs blockList = kvPair.getValue();
-        reports[reportIndex++] =
-            new StorageBlockReport(dnStorage, blockList);
-      }
-      // Get the list of storage ids associated with the datanode
-      // before the test
-      BlockManager bm =
-          cluster.getNameNode().getNamesystem().getBlockManager();
-      final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
-          getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
-      DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
-      // Send the full block report concurrently using
-      // numThreads=numStoragesPerDatanode
-      ExecutorService executorService = Executors.
-          newFixedThreadPool(numStoragesPerDatanode);
-      List<Future<DatanodeCommand>> futureList =
-          new ArrayList<>(numStoragesPerDatanode);
-      for (int i = 0; i < numStoragesPerDatanode; i++) {
-        futureList.add(executorService.submit(new Callable<DatanodeCommand>() {
-          @Override
-          public DatanodeCommand call() throws IOException {
-            return cluster.getNameNodeRpc().blockReport(dnR, blockPoolId,
-                reports, new BlockReportContext(1, 0, System.nanoTime()));
-          }
-        }));
-      }
-      for (Future<DatanodeCommand> future: futureList) {
-        future.get();
-      }
-      executorService.shutdown();
-      // Verify that the storages match before and after the test
-      Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
-    } finally {
-      cluster.shutdown();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
index beaba8a..aadd9b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
@@ -41,7 +41,6 @@ import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 import static org.mockito.Matchers.*;
 import static org.mockito.Mockito.times;
@@ -89,34 +88,6 @@ public class TestDnRespectsBlockReportSplitThreshold {
         blockCount * BLOCK_SIZE, BLOCK_SIZE, REPL_FACTOR, seed);
   }
 
-  private void verifyCapturedArgumentsSplit(
-      ArgumentCaptor<StorageBlockReport[]> captor,
-      int expectedReportsPerCall,
-      int expectedTotalBlockCount) {
-    List<StorageBlockReport[]> listOfReports = captor.getAllValues();
-    int numBlocksReported = 0;
-    int storageIndex = 0;
-    int listOfReportsSize = listOfReports.size();
-    for (StorageBlockReport[] reports : listOfReports) {
-      if (storageIndex < (listOfReportsSize - 1)) {
-        assertThat(reports.length, is(expectedReportsPerCall));
-      } else {
-        assertThat(reports.length, is(listOfReportsSize));
-      }
-      for (StorageBlockReport report : reports) {
-        BlockListAsLongs blockList = report.getBlocks();
-        if (!blockList.isStorageReport()) {
-          numBlocksReported += blockList.getNumberOfBlocks();
-        } else {
-          assertEquals(blockList.getNumberOfBlocks(), -1);
-        }
-      }
-      storageIndex++;
-    }
-
-    assert(numBlocksReported >= expectedTotalBlockCount);
-  }
-
   private void verifyCapturedArguments(
       ArgumentCaptor<StorageBlockReport[]> captor,
       int expectedReportsPerCall,
@@ -165,7 +136,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
         anyString(),
         captor.capture(),  Mockito.<BlockReportContext>anyObject());
 
-    verifyCapturedArgumentsSplit(captor, 1, BLOCKS_IN_FILE);
+    verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
   }
 
   /**
@@ -229,7 +200,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
         anyString(),
         captor.capture(), Mockito.<BlockReportContext>anyObject());
 
-    verifyCapturedArgumentsSplit(captor, 1, BLOCKS_IN_FILE);
+    verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3282b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
index 5918eef..b150b0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
@@ -36,30 +35,13 @@ public class TestNNHandlesBlockReportPerStorage extends BlockReportTestBase
{
   @Override
   protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
       StorageBlockReport[] reports) throws IOException {
-    for (int r = 0; r < reports.length; r++) {
-      LOG.info("Sending block report for storage " +
-          reports[r].getStorage().getStorageID());
-      StorageBlockReport[] singletonReport = {reports[r]};
-      if (r != reports.length - 1) {
-        cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport,
-            new BlockReportContext(reports.length, r, System.nanoTime()));
-      } else {
-        StorageBlockReport[] lastSplitReport =
-            new StorageBlockReport[reports.length];
-        // When block reports are split, send a dummy storage report for all
-        // other storages in the blockreport along with the last storage report
-        for (int i = 0; i <= r; i++) {
-          if (i == r) {
-            lastSplitReport[i] = reports[r];
-            break;
-          }
-          lastSplitReport[i] =
-              new StorageBlockReport(reports[i].getStorage(),
-                  BlockListAsLongs.STORAGE_REPORT);
-        }
-        cluster.getNameNodeRpc().blockReport(dnR, poolId, lastSplitReport,
-            new BlockReportContext(reports.length, r, System.nanoTime()));
-      }
+    int i = 0;
+    for (StorageBlockReport report : reports) {
+      LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
+      StorageBlockReport[] singletonReport = { report };
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport,
+          new BlockReportContext(reports.length, i, System.nanoTime()));
+      i++;
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message