Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 873F5E6D0 for ; Tue, 29 Jan 2013 23:54:24 +0000 (UTC) Received: (qmail 77795 invoked by uid 500); 29 Jan 2013 23:54:24 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 77742 invoked by uid 500); 29 Jan 2013 23:54:24 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 77670 invoked by uid 99); 29 Jan 2013 23:54:24 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Jan 2013 23:54:24 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Jan 2013 23:54:21 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 3AF662388A56; Tue, 29 Jan 2013 23:54:02 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1440222 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/docs/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/ha... Date: Tue, 29 Jan 2013 23:54:01 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130129235402.3AF662388A56@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Tue Jan 29 23:53:59 2013 New Revision: 1440222 URL: http://svn.apache.org/viewvc?rev=1440222&view=rev Log: Merge r1438306 through r1440221 from trunk. Removed: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/forrest.properties Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1438306-1440221 Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jan 29 23:53:59 2013 @@ -497,6 +497,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4403. DFSClient can infer checksum type when not provided by reading first byte (todd) + HDFS-4259. Improve pipeline DN replacement failure message (harsh) + OPTIMIZATIONS HDFS-3429. DataNode reads checksums even if client does not need them (todd) @@ -728,6 +730,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4359. Slow RPC responses from NN can prevent metrics collection on DNs. (liang xie via atm) + HDFS-4444. Add space between total transaction time and number of + transactions in FSEditLog#printStatistics. (Stephen Chu via suresh) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. @@ -1377,6 +1382,9 @@ Release 2.0.2-alpha - 2012-09-07 HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu) HDFS-3972. Trash emptier fails in secure HA cluster. (todd via eli) + + HDFS-4443. Remove a trailing '`' character from the HTML code generated by + NamenodeJspHelper.generateNodeData(..). (Christian Rohling via szetszwo) BREAKDOWN OF HDFS-3042 SUBTASKS @@ -2225,6 +2233,8 @@ Release 0.23.7 - UNRELEASED BUG FIXES + HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal) + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1438306-1440221 Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Jan 29 23:53:59 2013 @@ -786,13 +786,18 @@ public class DFSOutputStream extends FSO private int findNewDatanode(final DatanodeInfo[] original ) throws IOException { if (nodes.length != original.length + 1) { - throw new IOException("Failed to add a datanode. " - + "User may turn off this feature by setting " - + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY - + " in configuration, where the current policy is " - + dfsClient.dtpReplaceDatanodeOnFailure - + ". (Nodes: current=" + Arrays.asList(nodes) - + ", original=" + Arrays.asList(original) + ")"); + throw new IOException( + new StringBuilder() + .append("Failed to replace a bad datanode on the existing pipeline ") + .append("due to no more good datanodes being available to try. ") + .append("(Nodes: current=").append(Arrays.asList(nodes)) + .append(", original=").append(Arrays.asList(original)).append("). ") + .append("The current failed datanode replacement policy is ") + .append(dfsClient.dtpReplaceDatanodeOnFailure).append(", and ") + .append("a client may configure this via '") + .append(DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY) + .append("' in its configuration.") + .toString()); } for(int i = 0; i < nodes.length; i++) { int j = 0; Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Tue Jan 29 23:53:59 2013 @@ -88,11 +88,7 @@ public class BlockInfo extends Block imp DatanodeDescriptor getDatanode(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; - DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3]; - assert node == null || - DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : - "DatanodeDescriptor is expected at " + index*3; - return node; + return (DatanodeDescriptor)triplets[index*3]; } private BlockInfo getPrevious(int index) { Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Jan 29 23:53:59 2013 @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; @@ -1576,7 +1577,10 @@ public class BlockManager { } // Log the block report processing stats from Namenode perspective - NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime)); + final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); + if (metrics != null) { + metrics.addBlockReport((int) (endTime - startTime)); + } blockLog.info("BLOCK* processReport: from " + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + ", processing time: " + (endTime - startTime) + " msecs"); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Tue Jan 29 23:53:59 2013 @@ -547,6 +547,7 @@ public class DatanodeDescriptor extends @Override public void updateRegInfo(DatanodeID nodeReg) { super.updateRegInfo(nodeReg); + firstBlockReport = true; // must re-process IBR after re-registration } /** Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Tue Jan 29 23:53:59 2013 @@ -419,7 +419,7 @@ public class DatanodeManager { } /** Add a datanode. */ - private void addDatanode(final DatanodeDescriptor node) { + void addDatanode(final DatanodeDescriptor node) { // To keep host2DatanodeMap consistent with datanodeMap, // remove from host2DatanodeMap the datanodeDescriptor removed // from datanodeMap before adding node to host2DatanodeMap. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Jan 29 23:53:59 2013 @@ -646,7 +646,7 @@ public class FSEditLog implements LogsPu buf.append(numTransactions); buf.append(" Total time for transactions(ms): "); buf.append(totalTimeTransactions); - buf.append("Number of transactions batched in Syncs: "); + buf.append(" Number of transactions batched in Syncs: "); buf.append(numTransactionsBatchedInSync); buf.append(" Number of syncs: "); buf.append(editLogStream.getNumSync()); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Tue Jan 29 23:53:59 2013 @@ -592,7 +592,7 @@ class NamenodeJspHelper { + "" + ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed), 100) - + "" + + "" + percentRemaining + "" + d.numBlocks()+"\n" Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1438306-1440221 Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1438306-1440221 Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1438306-1440221 Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1438306-1440221 Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1438306-1440221 Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1440222&r1=1440221&r2=1440222&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Tue Jan 29 23:53:59 2013 @@ -34,13 +34,16 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.net.NetworkTopology; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import static org.mockito.Mockito.*; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; @@ -485,4 +488,70 @@ public class TestBlockManager { new NumberReplicas(), UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY)); } + + @Test + public void testSafeModeIBR() throws Exception { + DatanodeDescriptor node = spy(nodes.get(0)); + node.setStorageID("dummy-storage"); + node.isAlive = true; + + DatanodeRegistration nodeReg = + new DatanodeRegistration(node, null, null, ""); + + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + // register new node + bm.getDatanodeManager().registerDatanode(nodeReg); + bm.getDatanodeManager().addDatanode(node); // swap in spy + assertEquals(node, bm.getDatanodeManager().getDatanode(node)); + assertTrue(node.isFirstBlockReport()); + // send block report, should be processed + reset(node); + bm.processReport(node, "pool", new BlockListAsLongs(null, null)); + verify(node).receivedBlockReport(); + assertFalse(node.isFirstBlockReport()); + // send block report again, should NOT be processed + reset(node); + bm.processReport(node, "pool", new BlockListAsLongs(null, null)); + verify(node, never()).receivedBlockReport(); + assertFalse(node.isFirstBlockReport()); + + // re-register as if node restarted, should update existing node + bm.getDatanodeManager().removeDatanode(node); + reset(node); + bm.getDatanodeManager().registerDatanode(nodeReg); + verify(node).updateRegInfo(nodeReg); + assertTrue(node.isFirstBlockReport()); // ready for report again + // send block report, should be processed after restart + reset(node); + bm.processReport(node, "pool", new BlockListAsLongs(null, null)); + verify(node).receivedBlockReport(); + assertFalse(node.isFirstBlockReport()); + } + + @Test + public void testSafeModeIBRAfterIncremental() throws Exception { + DatanodeDescriptor node = spy(nodes.get(0)); + node.setStorageID("dummy-storage"); + node.isAlive = true; + + DatanodeRegistration nodeReg = + new DatanodeRegistration(node, null, null, ""); + + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + // register new node + bm.getDatanodeManager().registerDatanode(nodeReg); + bm.getDatanodeManager().addDatanode(node); // swap in spy + assertEquals(node, bm.getDatanodeManager().getDatanode(node)); + assertTrue(node.isFirstBlockReport()); + // send block report while pretending to already have blocks + reset(node); + doReturn(1).when(node).numBlocks(); + bm.processReport(node, "pool", new BlockListAsLongs(null, null)); + verify(node).receivedBlockReport(); + assertFalse(node.isFirstBlockReport()); + } }