Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 52A72200CB7 for ; Thu, 25 May 2017 08:32:09 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 514A9160BDB; Thu, 25 May 2017 06:32:09 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id F34E3160BC7 for ; Thu, 25 May 2017 08:32:07 +0200 (CEST) Received: (qmail 49628 invoked by uid 500); 25 May 2017 06:32:06 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 49144 invoked by uid 99); 25 May 2017 06:32:06 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 25 May 2017 06:32:06 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id D87FCE3AA4; Thu, 25 May 2017 06:32:05 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: stack@apache.org To: commits@hbase.apache.org Date: Thu, 25 May 2017 06:32:11 -0000 Message-Id: In-Reply-To: <7f268bdf025d402482c9ad679ba54082@git.apache.org> References: <7f268bdf025d402482c9ad679ba54082@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [07/27] hbase git commit: Revert "HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)" Revert a mistaken commit!!! archived-at: Thu, 25 May 2017 06:32:09 -0000 http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java deleted file mode 100644 index c318ffc..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.junit.Assert.assertEquals; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.ipc.ServerTooBusyException; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -/** - * This class is for testing HBaseConnectionManager ServerBusyException. - * Be careful adding to this class. It sets a low - * HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD - */ -@Category({LargeTests.class}) -public class TestServerBusyException { - @Rule public final TestRule timeout = CategoryBasedTimeout.builder() - .withTimeout(this.getClass()) - .withLookingForStuckThread(true) - .build(); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final byte[] FAM_NAM = Bytes.toBytes("f"); - private static final byte[] ROW = Bytes.toBytes("bbb"); - private static final int RPC_RETRY = 5; - - @Rule - public TestName name = new TestName(); - - public static class SleepCoprocessor implements RegionObserver { - public static final int SLEEP_TIME = 5000; - @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { - Threads.sleep(SLEEP_TIME); - } - - @Override - public void prePut(final ObserverContext e, - final Put put, final WALEdit edit, final Durability durability) throws IOException { - Threads.sleep(SLEEP_TIME); - } - - @Override - public Result preIncrement(final ObserverContext e, - final Increment increment) throws IOException { - Threads.sleep(SLEEP_TIME); - return null; - } - - @Override - public void preDelete(final ObserverContext e, final Delete delete, - final WALEdit edit, final Durability durability) throws IOException { - Threads.sleep(SLEEP_TIME); - } - - } - - public static class SleepLongerAtFirstCoprocessor implements RegionObserver { - public static final int SLEEP_TIME = 2000; - static final AtomicLong ct = new AtomicLong(0); - @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { - // After first sleep, all requests are timeout except the last retry. If we handle - // all the following requests, finally the last request is also timeout. If we drop all - // timeout requests, we can handle the last request immediately and it will not timeout. - if (ct.incrementAndGet() <= 1) { - Threads.sleep(SLEEP_TIME * RPC_RETRY * 2); - } else { - Threads.sleep(SLEEP_TIME); - } - } - } - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); - // Up the handlers; this test needs more than usual. - TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); - TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, RPC_RETRY); - // simulate queue blocking in testDropTimeoutRequest - TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 1); - // Needed by the server busy test. - TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, 3); - TEST_UTIL.startMiniCluster(2); - } - - @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - private class TestPutThread extends Thread { - Table table; - int getServerBusyException = 0; - - TestPutThread(Table table){ - this.table = table; - } - - @Override - public void run() { - try { - Put p = new Put(ROW); - p.addColumn(FAM_NAM, new byte[]{0}, new byte[]{0}); - table.put(p); - } catch (RetriesExhaustedWithDetailsException e) { - if (e.exceptions.get(0) instanceof ServerTooBusyException) { - getServerBusyException = 1; - } - } catch (IOException ignore) { - } - } - } - - private class TestGetThread extends Thread { - Table table; - int getServerBusyException = 0; - - TestGetThread(Table table){ - this.table = table; - } - - @Override - public void run() { - try { - Get g = new Get(ROW); - g.addColumn(FAM_NAM, new byte[] { 0 }); - table.get(g); - } catch (ServerTooBusyException e) { - getServerBusyException = 1; - } catch (IOException ignore) { - } - } - } - - @Test() - public void testServerBusyException() throws Exception { - HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName())); - hdt.addCoprocessor(SleepCoprocessor.class.getName()); - Configuration c = new Configuration(TEST_UTIL.getConfiguration()); - TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c); - - TestGetThread tg1 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg2 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg3 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg4 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg5 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - tg1.start(); - tg2.start(); - tg3.start(); - tg4.start(); - tg5.start(); - tg1.join(); - tg2.join(); - tg3.join(); - tg4.join(); - tg5.join(); - assertEquals(2, - tg1.getServerBusyException + tg2.getServerBusyException + tg3.getServerBusyException - + tg4.getServerBusyException + tg5.getServerBusyException); - - // Put has its own logic in HTable, test Put alone. We use AsyncProcess for Put (use multi at - // RPC level) and it wrap exceptions to RetriesExhaustedWithDetailsException. - - TestPutThread tp1 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp2 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp3 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp4 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp5 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - tp1.start(); - tp2.start(); - tp3.start(); - tp4.start(); - tp5.start(); - tp1.join(); - tp2.join(); - tp3.join(); - tp4.join(); - tp5.join(); - assertEquals(2, - tp1.getServerBusyException + tp2.getServerBusyException + tp3.getServerBusyException - + tp4.getServerBusyException + tp5.getServerBusyException); - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index aef67bf..66c5abf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index 10f466d..852c5cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -17,24 +17,19 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -42,9 +37,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import java.io.IOException; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + @Category({MediumTests.class, ClientTests.class}) public class TestSplitOrMergeStatus { + private static final Log LOG = LogFactory.getLog(TestSplitOrMergeStatus.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static byte [] FAMILY = Bytes.toBytes("testFamily"); @@ -74,7 +77,7 @@ public class TestSplitOrMergeStatus { TEST_UTIL.loadTable(t, FAMILY, false); RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName()); - int originalCount = locator.getAllRegionLocations().size(); + int orignalCount = locator.getAllRegionLocations().size(); Admin admin = TEST_UTIL.getAdmin(); initSwitchStatus(admin); @@ -82,17 +85,14 @@ public class TestSplitOrMergeStatus { assertEquals(results.length, 1); assertTrue(results[0]); admin.split(t.getName()); - int count = admin.getTableRegions(tableName).size(); - assertTrue(originalCount == count); + int count = waitOnSplitOrMerge(t).size(); + assertTrue(orignalCount == count); results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertFalse(results[0]); admin.split(t.getName()); - while ((count = admin.getTableRegions(tableName).size()) == originalCount) { - Threads.sleep(1);; - } - count = admin.getTableRegions(tableName).size(); - assertTrue(originalCount < count); + count = waitOnSplitOrMerge(t).size(); + assertTrue(orignalCount parentFN = fnm.getFavoredNodes(parent); assertNotNull("FN should not be null for region: " + parent, parentFN); - LOG.info("SPLITTING TABLE"); admin.split(tableName, splitPoint); TEST_UTIL.waitUntilNoRegionsInTransition(WAIT_TIMEOUT); - LOG.info("FINISHED WAITING ON RIT"); - waitUntilTableRegionCountReached(tableName, numberOfRegions + 1); + waitUntilTableRegionCountReached(tableName, NUM_REGIONS + 1); - // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); + // All regions should have favored nodes + checkIfFavoredNodeInformationIsCorrect(tableName); // Get the daughters of parent. HRegionInfo daughter1 = locator.getRegionLocation(parent.getStartKey(), true).getRegionInfo(); @@ -207,18 +206,11 @@ public class TestTableFavoredNodes { // Major compact table and run catalog janitor. Parent's FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); - admin.runCatalogScan(); - // Catalog cleanup is async. Wait on procedure to finish up. - ProcedureTestingUtility.waitAllProcedures( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); - // assertEquals("Parent region should have been cleaned", 1, admin.runCatalogScan()); + assertEquals("Parent region should have been cleaned", 1, admin.runCatalogScan()); assertNull("Parent FN should be null", fnm.getFavoredNodes(parent)); List regions = admin.getTableRegions(tableName); - // Split and Table Disable interfere with each other around region replicas - // TODO. Meantime pause a few seconds. - Threads.sleep(2000); - LOG.info("STARTING DELETE"); + TEST_UTIL.deleteTable(tableName); checkNoFNForDeletedTable(regions); @@ -243,12 +235,11 @@ public class TestTableFavoredNodes { LOG.info("regionA: " + regionA.getEncodedName() + " with FN: " + fnm.getFavoredNodes(regionA)); LOG.info("regionB: " + regionA.getEncodedName() + " with FN: " + fnm.getFavoredNodes(regionB)); - int countOfRegions = MetaTableAccessor.getRegionCount(TEST_UTIL.getConfiguration(), tableName); admin.mergeRegionsAsync(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false).get(60, TimeUnit.SECONDS); TEST_UTIL.waitUntilNoRegionsInTransition(WAIT_TIMEOUT); - waitUntilTableRegionCountReached(tableName, countOfRegions - 1); + waitUntilTableRegionCountReached(tableName, NUM_REGIONS - 1); // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); @@ -263,9 +254,6 @@ public class TestTableFavoredNodes { // Major compact table and run catalog janitor. Parent FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); assertEquals("Merge parents should have been cleaned", 1, admin.runCatalogScan()); - // Catalog cleanup is async. Wait on procedure to finish up. - ProcedureTestingUtility.waitAllProcedures( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); assertNull("Parent FN should be null", fnm.getFavoredNodes(regionA)); assertNull("Parent FN should be null", fnm.getFavoredNodes(regionB)); @@ -278,7 +266,6 @@ public class TestTableFavoredNodes { private void checkNoFNForDeletedTable(List regions) { for (HRegionInfo region : regions) { - LOG.info("Testing if FN data for " + region); assertNull("FN not null for deleted table's region: " + region, fnm.getFavoredNodes(region)); } } @@ -389,8 +376,8 @@ public class TestTableFavoredNodes { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return MetaTableAccessor.getRegionCount(TEST_UTIL.getConfiguration(), tableName) == numRegions; + return regionStates.getRegionsOfTable(tableName).size() == numRegions; } }); } -} +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java index 8805337..35ed531 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java @@ -32,6 +32,7 @@ import java.util.NavigableMap; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; @@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -84,9 +84,6 @@ public class TestIncrementTimeRange { public static void setupBeforeClass() throws Exception { util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyObserver.class.getName()); - // Make general delay zero rather than default. Timing is off in this - // test that depends on an evironment edge that is manually moved forward. - util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0); util.startMiniCluster(); EnvironmentEdgeManager.injectEdge(mee); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index a1b33f7..1d7c12e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -49,7 +49,6 @@ import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Put; @@ -67,19 +66,17 @@ import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestRule; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @Category({RPCTests.class, SmallTests.class}) -public class TestSimpleRpcScheduler { +public class TestSimpleRpcScheduler {/* @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + withLookingForStuckThread(true).build();*/ private static final Log LOG = LogFactory.getLog(TestSimpleRpcScheduler.class); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index b6ad2c9..7f1723c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -26,6 +26,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Deque; import java.util.List; import java.util.Locale; import java.util.Map; @@ -59,7 +60,6 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 626e154..ff6b88e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -32,17 +32,14 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.locking.LockManager; +import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockInfo; -import org.apache.hadoop.hbase.procedure2.ProcedureEvent; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -377,11 +374,6 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override - public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { - return false; - } - - @Override public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException { } @@ -434,16 +426,4 @@ public class MockNoopMasterServices implements MasterServices, Server { public LockManager getLockManager() { return null; } - - @Override - public long dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible, long nonceGroup, - long nonce) throws IOException { - return 0; - } - - @Override - public ProcedureEvent getInitializedEvent() { - // TODO Auto-generated method stub - return null; - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 7ac7571..67a3f0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.locking.EntityLock; @@ -53,10 +54,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompac import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -69,8 +70,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; @@ -503,6 +502,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge( + RpcController controller, + CloseRegionForSplitOrMergeRequest request) throws ServiceException { + return null; + } + + @Override public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) throws ServiceException { // TODO Auto-generated method stub @@ -510,6 +516,16 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public long requestRegionSplit(HRegionInfo regionInfo, byte[] splitRow) { + return -1; + } + + @Override + public boolean isProcedureFinished(final long procId) { + return false; + } + + @Override public SplitRegionResponse splitRegion(RpcController controller, SplitRegionRequest request) throws ServiceException { return null; @@ -722,22 +738,9 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public ExecuteProceduresResponse executeProcedures(RpcController controller, - ExecuteProceduresRequest request) throws ServiceException { - return null; - } - - @Override public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( RpcController controller, GetSpaceQuotaSnapshotsRequest request) throws ServiceException { return null; } - - @Override - public MergeRegionsResponse mergeRegions(RpcController controller, MergeRegionsRequest request) - throws ServiceException { - // TODO Auto-generated method stub - return null; - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 95c0615..78b75d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MasterTests;