Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id C6D9D200BBE for ; Fri, 11 Nov 2016 10:18:41 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id C5A32160AE4; Fri, 11 Nov 2016 09:18:41 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 9523C160AF5 for ; Fri, 11 Nov 2016 10:18:40 +0100 (CET) Received: (qmail 94267 invoked by uid 500); 11 Nov 2016 09:18:39 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 94258 invoked by uid 99); 11 Nov 2016 09:18:39 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Nov 2016 09:18:39 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 994C9DFF41; Fri, 11 Nov 2016 09:18:39 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: umamahesh@apache.org To: common-commits@hadoop.apache.org Message-Id: <63d333f66a63406f8d8bba41b2e76ac7@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-11068: [SPS]: Provide unique trackID to track the block movement sends to coordinator. Contributed by Rakesh R Date: Fri, 11 Nov 2016 09:18:39 +0000 (UTC) archived-at: Fri, 11 Nov 2016 09:18:42 -0000 Repository: hadoop Updated Branches: refs/heads/HDFS-10285 d7fcee1d6 -> 5686f564d HDFS-11068: [SPS]: Provide unique trackID to track the block movement sends to coordinator. Contributed by Rakesh R Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5686f564 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5686f564 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5686f564 Branch: refs/heads/HDFS-10285 Commit: 5686f564db4adbd7a28d5a0ad62d9df3ae962988 Parents: d7fcee1 Author: Uma Maheswara Rao G Authored: Fri Nov 11 01:17:50 2016 -0800 Committer: Uma Maheswara Rao G Committed: Fri Nov 11 01:17:50 2016 -0800 ---------------------------------------------------------------------- .../blockmanagement/DatanodeDescriptor.java | 31 +++++-- .../server/blockmanagement/DatanodeManager.java | 16 ++-- .../BlockStorageMovementInfosBatch.java | 61 +++++++++++++ .../server/namenode/StoragePolicySatisfier.java | 5 +- .../protocol/BlocksStorageMovementResult.java | 6 ++ .../namenode/TestStoragePolicySatisfier.java | 95 +++++++++++++++----- 6 files changed, 174 insertions(+), 40 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 69efc19..3f4986b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementInfosBatch; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; @@ -205,8 +205,11 @@ public class DatanodeDescriptor extends DatanodeInfo { private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet<>(); - /** A queue of blocks for moving its storage placements by this datanode. */ - private final Queue> storageMovementBlocks = + /** + * A queue of blocks corresponding to trackID for moving its storage + * placements by this datanode. + */ + private final Queue storageMovementBlocks = new LinkedList<>(); /* Variables for maintaining number of blocks scheduled to be written to @@ -933,18 +936,30 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Add the block infos which needs to move its storage locations. * + * @param trackID + * - unique identifier which will be used for tracking the given set + * of blocks movement completion. * @param storageMismatchedBlocks * - storage mismatched block infos */ - public void addBlocksToMoveStorage( + public void addBlocksToMoveStorage(long trackID, List storageMismatchedBlocks) { - storageMovementBlocks.offer(storageMismatchedBlocks); + synchronized (storageMovementBlocks) { + storageMovementBlocks.offer( + new BlockStorageMovementInfosBatch(trackID, storageMismatchedBlocks)); + } } /** - * @return block infos which needs to move its storage locations. + * @return block infos which needs to move its storage locations. This returns + * list of blocks under one trackId. */ - public List getBlocksToMoveStorages() { - return storageMovementBlocks.poll(); + public BlockStorageMovementInfosBatch getBlocksToMoveStorages() { + synchronized (storageMovementBlocks) { + // TODO: Presently returning the list of blocks under one trackId. + // Need to limit the list of items into small batches with in trackId + // itself if blocks are many(For example: a file contains many blocks). + return storageMovementBlocks.poll(); + } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 23efcad..b98ad1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; +import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementInfosBatch; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; @@ -45,7 +46,6 @@ import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock; -import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.*; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; @@ -1628,16 +1628,14 @@ public class DatanodeManager { } // check pending block storage movement tasks - List pendingBlockMovementList = nodeinfo + BlockStorageMovementInfosBatch blkStorageMovementInfosBatch = nodeinfo .getBlocksToMoveStorages(); - if (pendingBlockMovementList != null) { - // TODO: trackID is used to track the block movement sends to coordinator - // datanode. Need to implement tracking logic. Temporarily, using a - // constant value -1. - long trackID = -1; + + if (blkStorageMovementInfosBatch != null) { cmds.add(new BlockStorageMovementCommand( - DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT, trackID, blockPoolId, - pendingBlockMovementList)); + DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT, + blkStorageMovementInfosBatch.getTrackID(), blockPoolId, + blkStorageMovementInfosBatch.getBlockMovingInfo())); } if (!cmds.isEmpty()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java new file mode 100644 index 0000000..a790c13 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.List; + +import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; + +/** + * This class represents a batch of blocks under one trackId which needs to move + * its storage locations to satisfy the storage policy. + */ +public class BlockStorageMovementInfosBatch { + private long trackID; + private List blockMovingInfos; + + /** + * Constructor to create the block storage movement infos batch. + * + * @param trackID + * - unique identifier which will be used for tracking the given set + * of blocks movement. + * @param blockMovingInfos + * - list of block to storage infos. + */ + public BlockStorageMovementInfosBatch(long trackID, + List blockMovingInfos) { + this.trackID = trackID; + this.blockMovingInfos = blockMovingInfos; + } + + public long getTrackID() { + return trackID; + } + + public List getBlockMovingInfo() { + return blockMovingInfos; + } + + @Override + public String toString() { + return new StringBuilder().append("BlockStorageMovementInfosBatch(\n ") + .append("TrackID: ").append(trackID).append(" BlockMovingInfos: ") + .append(blockMovingInfos).append(")").toString(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index 6fa9302..4967a89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -217,7 +217,10 @@ public class StoragePolicySatisfier implements Runnable { // chances, then we can just retry limited number of times and exit. return; } - coordinatorNode.addBlocksToMoveStorage(blockMovingInfos); + + // 'BlockCollectionId' is used as the tracking ID. All the blocks under this + // blockCollectionID will be added to this datanode. + coordinatorNode.addBlocksToMoveStorage(blockCollectionID, blockMovingInfos); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java index 1afba34..713b83b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java @@ -61,4 +61,10 @@ public class BlocksStorageMovementResult { return status; } + @Override + public String toString() { + return new StringBuilder().append("BlocksStorageMovementResult(\n ") + .append("track id: ").append(trackId).append(" status: ") + .append(status).append(")").toString(); + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686f564/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 6f5c717..e84052f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; @@ -54,15 +56,15 @@ public class TestStoragePolicySatisfier { final private int storagesPerDatanode = 2; final private long capacity = 2 * 256 * 1024 * 1024; final private String file = "/testMoveWhenStoragePolicyNotSatisfying"; - private DistributedFileSystem distributedFS = null; + private DistributedFileSystem dfs = null; @Before public void setUp() throws IOException { config.setLong("dfs.block.size", 1024); hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes, storagesPerDatanode, capacity); - distributedFS = hdfsCluster.getFileSystem(); - writeContent(distributedFS, file); + dfs = hdfsCluster.getFileSystem(); + writeContent(file); } @Test(timeout = 300000) @@ -71,7 +73,7 @@ public class TestStoragePolicySatisfier { try { // Change policy to ALL_SSD - distributedFS.setStoragePolicy(new Path(file), "COLD"); + dfs.setStoragePolicy(new Path(file), "COLD"); FSNamesystem namesystem = hdfsCluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode(file); @@ -86,8 +88,7 @@ public class TestStoragePolicySatisfier { hdfsCluster.triggerHeartbeats(); // Wait till namenode notified about the block location details - waitExpectedStorageType(file, StorageType.ARCHIVE, distributedFS, 3, - 30000); + waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000); } finally { hdfsCluster.shutdown(); } @@ -98,7 +99,7 @@ public class TestStoragePolicySatisfier { throws Exception { try { // Change policy to ALL_SSD - distributedFS.setStoragePolicy(new Path(file), "ALL_SSD"); + dfs.setStoragePolicy(new Path(file), "ALL_SSD"); FSNamesystem namesystem = hdfsCluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode(file); @@ -115,7 +116,7 @@ public class TestStoragePolicySatisfier { hdfsCluster.triggerHeartbeats(); // Wait till StorgePolicySatisfier Identified that block to move to SSD // areas - waitExpectedStorageType(file, StorageType.SSD, distributedFS, 3, 30000); + waitExpectedStorageType(file, StorageType.SSD, 3, 30000); } finally { hdfsCluster.shutdown(); } @@ -126,7 +127,7 @@ public class TestStoragePolicySatisfier { throws Exception { try { // Change policy to ONE_SSD - distributedFS.setStoragePolicy(new Path(file), "ONE_SSD"); + dfs.setStoragePolicy(new Path(file), "ONE_SSD"); FSNamesystem namesystem = hdfsCluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode(file); @@ -141,8 +142,8 @@ public class TestStoragePolicySatisfier { hdfsCluster.triggerHeartbeats(); // Wait till StorgePolicySatisfier Identified that block to move to SSD // areas - waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000); - waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000); + waitExpectedStorageType(file, StorageType.SSD, 1, 30000); + waitExpectedStorageType(file, StorageType.DISK, 2, 30000); } finally { hdfsCluster.shutdown(); } @@ -156,7 +157,7 @@ public class TestStoragePolicySatisfier { public void testPerTrackIdBlocksStorageMovementResults() throws Exception { try { // Change policy to ONE_SSD - distributedFS.setStoragePolicy(new Path(file), "ONE_SSD"); + dfs.setStoragePolicy(new Path(file), "ONE_SSD"); FSNamesystem namesystem = hdfsCluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode(file); @@ -171,8 +172,8 @@ public class TestStoragePolicySatisfier { hdfsCluster.triggerHeartbeats(); // Wait till the block is moved to SSD areas - waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000); - waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000); + waitExpectedStorageType(file, StorageType.SSD, 1, 30000); + waitExpectedStorageType(file, StorageType.DISK, 2, 30000); waitForBlocksMovementResult(1, 30000); } finally { @@ -180,7 +181,58 @@ public class TestStoragePolicySatisfier { } } - private void waitForBlocksMovementResult(int expectedResultsCount, + /** + * Tests to verify that multiple files are giving to satisfy storage policy + * and should work well altogether. + */ + @Test(timeout = 300000) + public void testMultipleFilesForSatisfyStoragePolicy() throws Exception { + List files = new ArrayList<>(); + files.add(file); + + // Creates 4 more files. Send all of them for satisfying the storage policy + // together. + for (int i = 0; i < 4; i++) { + String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i; + files.add(file1); + writeContent(file1); + } + + try { + FSNamesystem namesystem = hdfsCluster.getNamesystem(); + List blockCollectionIds = new ArrayList<>(); + // Change policy to ONE_SSD + for (String fileName : files) { + dfs.setStoragePolicy(new Path(fileName), "ONE_SSD"); + INode inode = namesystem.getFSDirectory().getINode(fileName); + blockCollectionIds.add(inode.getId()); + } + + StorageType[][] newtypes = + new StorageType[][]{{StorageType.SSD, StorageType.DISK}}; + + // Making sure SDD based nodes added to cluster. Adding SSD based + // datanodes. + startAdditionalDNs(config, 1, numOfDatanodes, newtypes, + storagesPerDatanode, capacity, hdfsCluster); + for (long inodeId : blockCollectionIds) { + namesystem.getBlockManager().satisfyStoragePolicy(inodeId); + } + hdfsCluster.triggerHeartbeats(); + + for (String fileName : files) { + // Wait till the block is moved to SSD areas + waitExpectedStorageType(fileName, StorageType.SSD, 1, 30000); + waitExpectedStorageType(fileName, StorageType.DISK, 2, 30000); + } + + waitForBlocksMovementResult(blockCollectionIds.size(), 30000); + } finally { + hdfsCluster.shutdown(); + } + } + + private void waitForBlocksMovementResult(long expectedBlkMovResultsCount, int timeout) throws TimeoutException, InterruptedException { BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager(); final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier(); @@ -188,16 +240,15 @@ public class TestStoragePolicySatisfier { @Override public Boolean get() { LOG.info("expectedResultsCount={} actualResultsCount={}", - expectedResultsCount, + expectedBlkMovResultsCount, sps.getAttemptedItemsMonitor().resultsCount()); - return expectedResultsCount == sps.getAttemptedItemsMonitor() - .resultsCount(); + return sps.getAttemptedItemsMonitor() + .resultsCount() == expectedBlkMovResultsCount; } }, 100, timeout); } - private void writeContent(final DistributedFileSystem dfs, - final String fileName) throws IOException { + private void writeContent(final String fileName) throws IOException { // write to DISK final FSDataOutputStream out = dfs.create(new Path(fileName)); for (int i = 0; i < 1000; i++) { @@ -243,8 +294,8 @@ public class TestStoragePolicySatisfier { // Check whether the Block movement has been successfully completed to satisfy // the storage policy for the given file. private void waitExpectedStorageType(final String fileName, - final StorageType expectedStorageType, final DistributedFileSystem dfs, - int expectedStorageCount, int timeout) throws Exception { + final StorageType expectedStorageType, int expectedStorageCount, + int timeout) throws Exception { GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org