Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9157F18205 for ; Tue, 16 Jun 2015 18:41:42 +0000 (UTC) Received: (qmail 6163 invoked by uid 500); 16 Jun 2015 18:41:22 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 5940 invoked by uid 500); 16 Jun 2015 18:41:22 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 4641 invoked by uid 99); 16 Jun 2015 18:41:21 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 16 Jun 2015 18:41:21 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 7284CE3C4A; Tue, 16 Jun 2015 18:41:21 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Tue, 16 Jun 2015 18:42:02 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [43/50] [abbrv] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang. http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index 6b8388e..d081a6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; @@ -133,7 +133,7 @@ public class FileDiffList extends Block dontRemoveBlock = null; if (lastBlock != null && lastBlock.getBlockUCState().equals( HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { - dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) + dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock) .getTruncateBlock(); } // Collect the remaining blocks of the file, ignoring truncate block http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 50b85c0..d06b024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -109,7 +109,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -1612,9 +1612,9 @@ public class DFSTestUtil { BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock()); assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, - storedBlock instanceof BlockInfoContiguousUnderConstruction); - BlockInfoContiguousUnderConstruction ucBlock = - (BlockInfoContiguousUnderConstruction)storedBlock; + storedBlock instanceof BlockInfoUnderConstruction); + BlockInfoUnderConstruction ucBlock = + (BlockInfoUnderConstruction)storedBlock; // We expect that the replica with the most recent heart beat will be // the one to be in charge of the synchronization / recovery protocol. final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java index a7ba293..630cd1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -40,7 +39,8 @@ public class TestBlockInfoUnderConstruction { DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; - BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( + BlockInfoUnderConstruction blockInfo = + new BlockInfoUnderConstructionContiguous( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_CONSTRUCTION, @@ -51,7 +51,7 @@ public class TestBlockInfoUnderConstruction { DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.initializeBlockRecovery(1); - BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); + BlockInfoUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index fd03745..5a82b15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -726,7 +726,7 @@ public class TestBlockManager { // verify the storage info is correct assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo (ds) >= 0); - assertTrue(((BlockInfoContiguousUnderConstruction) bm. + assertTrue(((BlockInfoUnderConstruction) bm. getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) .findStorageInfo(ds) >= 0); @@ -747,8 +747,8 @@ public class TestBlockManager { private BlockInfo addUcBlockToBM(long blkId) { Block block = new Block(blkId); - BlockInfoContiguousUnderConstruction blockInfo = - new BlockInfoContiguousUnderConstruction(block, (short) 3); + BlockInfoUnderConstruction blockInfo = + new BlockInfoUnderConstructionContiguous(block, (short) 3); BlockCollection bc = Mockito.mock(BlockCollection.class); Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); bm.blocksMap.addBlockCollection(blockInfo, bc); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index 6fc30ba..e48e9e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -173,7 +172,8 @@ public class TestHeartbeatHandling { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0]}; - BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( + BlockInfoUnderConstruction blockInfo = + new BlockInfoUnderConstructionContiguous( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -195,7 +195,7 @@ public class TestHeartbeatHandling { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); - blockInfo = new BlockInfoContiguousUnderConstruction( + blockInfo = new BlockInfoUnderConstructionContiguous( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -216,7 +216,7 @@ public class TestHeartbeatHandling { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000); - blockInfo = new BlockInfoContiguousUnderConstruction( + blockInfo = new BlockInfoUnderConstructionContiguous( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index fccaf3c..6e98538 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1176,7 +1176,8 @@ public class TestReplicationPolicy { // block under construction, the BlockManager will realize the expected // replication has been achieved and remove it from the under-replicated // queue. - BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); + BlockInfoUnderConstruction info = + new BlockInfoUnderConstructionContiguous(block1, (short) 1); BlockCollection bc = mock(BlockCollection.class); when(bc.getPreferredBlockReplication()).thenReturn((short)1); bm.addBlockCollection(info, bc); @@ -1232,7 +1233,7 @@ public class TestReplicationPolicy { DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo( dataNodes[0], new DatanodeStorage("s1"))}; - final BlockInfoContiguousUnderConstruction ucBlock = + final BlockInfoUnderConstruction ucBlock = info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry); DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index f372bec..872ff9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.junit.AfterClass; @@ -170,7 +170,7 @@ public class TestBlockUnderConstruction { final List blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); - assertTrue(b instanceof BlockInfoContiguousUnderConstruction); + assertTrue(b instanceof BlockInfoUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java index bea7241..b6cb522 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.junit.Test; @@ -68,8 +69,10 @@ public class TestCommitBlockSynchronization { namesystem.dir.getINodeMap().put(file); FSNamesystem namesystemSpy = spy(namesystem); - BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( - block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); + BlockInfoUnderConstruction blockInfo = + new BlockInfoUnderConstructionContiguous( + block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, + targets); blockInfo.setBlockCollection(file); blockInfo.setGenerationStamp(genStamp); blockInfo.initializeBlockRecovery(genStamp); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 222f22b..df920e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.security.UserGroupInformation; @@ -1019,7 +1019,7 @@ public class TestFileTruncate { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( @@ -1052,7 +1052,7 @@ public class TestFileTruncate { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index d202fb7..14d9a1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -72,7 +72,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; @@ -752,8 +752,8 @@ public class TestRetryCacheWithHA { boolean checkNamenodeBeforeReturn() throws Exception { INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory() .getINode4Write(file).asFile(); - BlockInfoContiguousUnderConstruction blkUC = - (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1]; + BlockInfoUnderConstruction blkUC = + (BlockInfoUnderConstruction) (fileNode.getBlocks())[1]; int datanodeNum = blkUC.getExpectedStorageLocations().length; for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) { Thread.sleep(1000); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index a1abd08..824f45b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.BlockScanner; @@ -177,7 +177,7 @@ public class SnapshotTestHelper { * Specific information for different types of INode: * {@link INodeDirectory}:childrenSize * {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()} - * and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information. + * and {@link BlockInfoUnderConstruction#toString()} for detailed information. * {@link FileWithSnapshot}: next link * * @see INode#dumpTreeRecursively()