Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 3FC1A200CFC for ; Thu, 28 Sep 2017 14:30:44 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 3DFC11609EE; Thu, 28 Sep 2017 12:30:44 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id DF5F21609EF for ; Thu, 28 Sep 2017 14:30:41 +0200 (CEST) Received: (qmail 80799 invoked by uid 500); 28 Sep 2017 12:30:34 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 80323 invoked by uid 99); 28 Sep 2017 12:30:34 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 28 Sep 2017 12:30:34 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id E9F40F5BD4; Thu, 28 Sep 2017 12:30:31 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: chia7712@apache.org To: commits@hbase.apache.org Date: Thu, 28 Sep 2017 12:30:33 -0000 Message-Id: <5603c412c2cf444d98ac656bd082528b@git.apache.org> In-Reply-To: <7bdfef497ba64382b13e8db024a194b2@git.apache.org> References: <7bdfef497ba64382b13e8db024a194b2@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [03/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base archived-at: Thu, 28 Sep 2017 12:30:44 -0000 http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java index 3ad8ec8..6f109e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -23,10 +23,10 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; @@ -75,7 +75,7 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, "f"); UTIL.getAdmin().disableTable(tableName); @@ -118,7 +118,7 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { } private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception { - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); @@ -136,7 +136,7 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { // create the table byte[][] splitKeys = null; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index 4e96cea..3eeb382 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -26,18 +26,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; @@ -50,6 +45,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState; + @Category({MasterTests.class, LargeTests.class}) public class TestMasterFailoverWithProcedures { private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class); @@ -113,7 +114,7 @@ public class TestMasterFailoverWithProcedures { // Start the Create procedure && kill the executor byte[][] splitKeys = null; TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); - HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); long procId = procExec.submitProcedure( new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); testRecoveryAndDoubleExecution(UTIL, procId, step); @@ -140,7 +141,7 @@ public class TestMasterFailoverWithProcedures { // create the table byte[][] splitKeys = null; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); Path tableDir = FSUtils.getTableDir(getRootDir(), tableName); MasterProcedureTestingUtility.validateTableCreation( @@ -182,7 +183,7 @@ public class TestMasterFailoverWithProcedures { final byte[][] splitKeys = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, families); // load and verify that there are rows in the table MasterProcedureTestingUtility.loadData( @@ -203,7 +204,7 @@ public class TestMasterFailoverWithProcedures { UTIL.waitUntilAllRegionsAssigned(tableName); // validate the table regions and layout - regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new HRegionInfo[0]); + regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new RegionInfo[0]); if (preserveSplits) { assertEquals(1 + splitKeys.length, regions.length); } else { http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java index 68013fb..86f0abc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java @@ -30,8 +30,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; -import org.junit.Ignore; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.junit.rules.TestRule; @@ -136,7 +136,7 @@ public class TestMasterProcedureWalLease { // Try to trigger a command on the master (WAL lease expired on the active one) TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f"); - HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null); + RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); LOG.debug("submit proc"); try { getMasterProcedureExecutor().submitProcedure( http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 8e77985..8b58646 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -25,9 +25,9 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; @@ -190,7 +190,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, "cf1", cf3); UTIL.getAdmin().disableTable(tableName); @@ -230,7 +230,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, "cf1", cf3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); @@ -268,7 +268,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, "cf1"); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); @@ -297,7 +297,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, "cf1"); UTIL.getAdmin().disableTable(tableName); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java index 38a12e8..bb531ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -29,9 +29,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; @@ -123,7 +123,7 @@ public class TestProcedureAdmin { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - HRegionInfo[] regions = + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); UTIL.getAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); @@ -155,7 +155,7 @@ public class TestProcedureAdmin { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - HRegionInfo[] regions = + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java index c255843..08070ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java @@ -26,16 +26,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -96,7 +95,7 @@ public class TestSafemodeBringsDownMaster { final byte[][] splitKeys = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); MiniDFSCluster dfsCluster = UTIL.getDFSCluster(); DistributedFileSystem dfs = (DistributedFileSystem) dfsCluster.getFileSystem(); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java index cebee98..c5a17a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java @@ -24,10 +24,10 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; @@ -101,7 +101,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, families); // load and verify that there are rows in the table MasterProcedureTestingUtility.loadData( @@ -119,7 +119,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { UTIL.waitUntilAllRegionsAssigned(tableName); // validate the table regions and layout - regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new HRegionInfo[0]); + regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new RegionInfo[0]); if (preserveSplits) { assertEquals(1 + splitKeys.length, regions.length); } else { @@ -157,7 +157,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { final byte[][] splitKeys = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; - HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, families); // load and verify that there are rows in the table MasterProcedureTestingUtility.loadData( @@ -181,7 +181,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { UTIL.waitUntilAllRegionsAssigned(tableName); // validate the table regions and layout - regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new HRegionInfo[0]); + regions = UTIL.getAdmin().getTableRegions(tableName).toArray(new RegionInfo[0]); if (preserveSplits) { assertEquals(1 + splitKeys.length, regions.length); } else { http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 19e7137..15e3c9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -24,24 +24,26 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; import org.junit.AfterClass; @@ -49,6 +51,10 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; + /** * Test that we correctly reload the cache, filter directories, etc. */ @@ -180,7 +186,7 @@ public class TestSnapshotFileCache { SnapshotReferenceUtil .visitReferencedFiles(UTIL.getConfiguration(), fs, builder.getSnapshotsDir(), new SnapshotReferenceUtil.SnapshotVisitor() { - @Override public void storeFile(HRegionInfo regionInfo, String familyName, + @Override public void storeFile(RegionInfo regionInfo, String familyName, SnapshotProtos.SnapshotRegionManifest.StoreFile storeFile) throws IOException { FileStatus status = mockStoreFile(storeFile.getName()); allStoreFiles.add(status); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java index 60f2467..719b5e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java @@ -34,9 +34,8 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -61,7 +60,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); final Region region = mockRegionWithSize(regionSizes); when(rs.getRegions()).thenReturn(Arrays.asList(region)); @@ -78,7 +77,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); final Region region = mockRegionWithSize(regionSizes); when(rs.getRegions()).thenReturn(Arrays.asList(region)); @@ -102,7 +101,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum)))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); final Region r1 = mockRegionWithSize(r1Sizes); final Region r2 = mockRegionWithSize(r2Sizes); @@ -167,7 +166,7 @@ public class TestFileSystemUtilizationChore { }; doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum, leftover2Sum)))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); // We shouldn't compute all of these region sizes, just the leftovers final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L)); @@ -199,7 +198,7 @@ public class TestFileSystemUtilizationChore { }; doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum)))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); // We shouldn't compute all of these region sizes, just the leftovers final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L)); @@ -225,7 +224,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum)))) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); final Region r1 = mockRegionWithSize(r1Sizes); final Region r2 = mockSplitParentRegionWithSize(r2Sizes); @@ -247,7 +246,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(r1Sum)) .when(rs) - .reportRegionSizesForQuotas((Map) any(Map.class)); + .reportRegionSizesForQuotas((Map) any(Map.class)); final Region r1 = mockRegionWithSize(r1Sizes); final Region r2 = mockRegionReplicaWithSize(r2Sizes); @@ -274,7 +273,7 @@ public class TestFileSystemUtilizationChore { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer( sum(Arrays.asList(r1HFileSizeSum, r2HFileSizeSum)))) - .when(rs).reportRegionSizesForQuotas((Map) any(Map.class)); + .when(rs).reportRegionSizesForQuotas((Map) any(Map.class)); final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes); final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes); @@ -321,7 +320,7 @@ public class TestFileSystemUtilizationChore { */ private Region mockRegionWithSize(Collection storeSizes) { final Region r = mock(Region.class); - final HRegionInfo info = mock(HRegionInfo.class); + final RegionInfo info = mock(RegionInfo.class); when(r.getRegionInfo()).thenReturn(info); List stores = new ArrayList<>(); when(r.getStores()).thenReturn((List) stores); @@ -335,7 +334,7 @@ public class TestFileSystemUtilizationChore { private Region mockRegionWithHFileLinks(Collection storeSizes, Collection hfileSizes) { final Region r = mock(Region.class); - final HRegionInfo info = mock(HRegionInfo.class); + final RegionInfo info = mock(RegionInfo.class); when(r.getRegionInfo()).thenReturn(info); List stores = new ArrayList<>(); when(r.getStores()).thenReturn((List) stores); @@ -363,7 +362,7 @@ public class TestFileSystemUtilizationChore { */ private Region mockSplitParentRegionWithSize(Collection storeSizes) { final Region r = mockRegionWithSize(storeSizes); - final HRegionInfo info = r.getRegionInfo(); + final RegionInfo info = r.getRegionInfo(); when(info.isSplitParent()).thenReturn(true); return r; } @@ -376,7 +375,7 @@ public class TestFileSystemUtilizationChore { */ private Region mockRegionReplicaWithSize(Collection storeSizes) { final Region r = mockRegionWithSize(storeSizes); - final HRegionInfo info = r.getRegionInfo(); + final RegionInfo info = r.getRegionInfo(); when(info.getReplicaId()).thenReturn(1); return r; } @@ -396,7 +395,7 @@ public class TestFileSystemUtilizationChore { Object[] args = invocation.getArguments(); assertEquals(1, args.length); @SuppressWarnings("unchecked") - Map regionSizes = (Map) args[0]; + Map regionSizes = (Map) args[0]; long sum = 0L; for (Long regionSize : regionSizes.values()) { sum += regionSize; http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java index a673bcb..303dad0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java @@ -33,12 +33,10 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; @@ -47,6 +45,11 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; + /** * Test class for {@link NamespaceQuotaSnapshotStore}. */ @@ -56,7 +59,7 @@ public class TestNamespaceQuotaViolationStore { private Connection conn; private QuotaObserverChore chore; - private Map regionReports; + private Map regionReports; private NamespaceQuotaSnapshotStore store; @Before @@ -109,22 +112,38 @@ public class TestNamespaceQuotaViolationStore { // Create some junk data to filter. Makes sure it's so large that it would // immediately violate the quota. for (int i = 0; i < 3; i++) { - regionReports.put(new HRegionInfo(tn3, Bytes.toBytes(i), Bytes.toBytes(i + 1)), + + regionReports.put(RegionInfoBuilder.newBuilder(tn3) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 5L * ONE_MEGABYTE); } - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(0), Bytes.toBytes(1)), 1024L * 512L); - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(1), Bytes.toBytes(2)), 1024L * 256L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(0)) + .setEndKey(Bytes.toBytes(1)) + .build(), 1024L * 512L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(1)) + .setEndKey(Bytes.toBytes(2)) + .build(), 1024L * 256L); // Below the quota assertEquals(false, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(2), Bytes.toBytes(3)), 1024L * 256L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(2)) + .setEndKey(Bytes.toBytes(3)) + .build(), 1024L * 256L); // Equal to the quota is still in observance assertEquals(false, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(3), Bytes.toBytes(4)), 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(3)) + .setEndKey(Bytes.toBytes(4)) + .build(), 1024L); // Exceeds the quota, should be in violation assertEquals(true, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); @@ -142,16 +161,28 @@ public class TestNamespaceQuotaViolationStore { assertEquals(0, size(store.filterBySubject("asdf"))); for (int i = 0; i < 5; i++) { - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < 3; i++) { - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < 10; i++) { - regionReports.put(new HRegionInfo(tn3, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < 8; i++) { - regionReports.put(new HRegionInfo(tn4, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn4) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } assertEquals(26, regionReports.size()); assertEquals(5, size(store.filterBySubject(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR))); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java index 74511f1..e82aee2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java @@ -23,9 +23,10 @@ import static org.mockito.Mockito.when; import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; @@ -57,17 +58,26 @@ public class TestQuotaObserverChore { final int numTable1Regions = 10; final int numTable2Regions = 15; final int numTable3Regions = 8; - Map regionReports = new HashMap<>(); + Map regionReports = new HashMap<>(); for (int i = 0; i < numTable1Regions; i++) { - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(i), Bytes.toBytes(i + 1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < numTable2Regions; i++) { - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(i), Bytes.toBytes(i + 1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < numTable3Regions; i++) { - regionReports.put(new HRegionInfo(tn3, Bytes.toBytes(i), Bytes.toBytes(i + 1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } TableQuotaSnapshotStore store = new TableQuotaSnapshotStore(conn, chore, regionReports); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java index 62c6b53..c57a89f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java @@ -31,13 +31,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; @@ -175,9 +175,9 @@ public class TestQuotaObserverChoreRegionReports { }); // Close the region, prevent the server from sending new status reports. - List regions = admin.getTableRegions(tn); + List regions = admin.getRegions(tn); assertEquals(1, regions.size()); - HRegionInfo hri = regions.get(0); + RegionInfo hri = regions.get(0); admin.unassign(hri.getRegionName(), true); // We should see this table move out of violation after the report expires. @@ -218,9 +218,9 @@ public class TestQuotaObserverChoreRegionReports { } } - private int getRegionReportsForTable(Map reports, TableName tn) { + private int getRegionReportsForTable(Map reports, TableName tn) { int numReports = 0; - for (Entry entry : reports.entrySet()) { + for (Entry entry : reports.entrySet()) { if (tn.equals(entry.getKey().getTable())) { numReports++; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java index aeae80a..a5fe406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java @@ -30,11 +30,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; @@ -94,7 +94,7 @@ public class TestQuotaStatusRPCs { Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, new Predicate() { @Override public boolean evaluate() throws Exception { - Map regionSizes = quotaManager.snapshotRegionSizes(); + Map regionSizes = quotaManager.snapshotRegionSizes(); LOG.trace("Region sizes=" + regionSizes); return numRegions == countRegionsForTable(tn, regionSizes) && tableSize <= getTableSize(tn, regionSizes); @@ -271,9 +271,9 @@ public class TestQuotaStatusRPCs { }); } - private int countRegionsForTable(TableName tn, Map regionSizes) { + private int countRegionsForTable(TableName tn, Map regionSizes) { int size = 0; - for (HRegionInfo regionInfo : regionSizes.keySet()) { + for (RegionInfo regionInfo : regionSizes.keySet()) { if (tn.equals(regionInfo.getTable())) { size++; } @@ -281,10 +281,10 @@ public class TestQuotaStatusRPCs { return size; } - private int getTableSize(TableName tn, Map regionSizes) { + private int getTableSize(TableName tn, Map regionSizes) { int tableSize = 0; - for (Entry entry : regionSizes.entrySet()) { - HRegionInfo regionInfo = entry.getKey(); + for (Entry entry : regionSizes.entrySet()) { + RegionInfo regionInfo = entry.getKey(); long regionSize = entry.getValue(); if (tn.equals(regionInfo.getTable())) { tableSize += regionSize; http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java index 8584d55..035216c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java @@ -30,13 +30,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -87,11 +87,11 @@ public class TestRegionSizeUse { admin.flush(tn); LOG.debug("Data flushed to disk"); // Get the final region distribution - final List regions = TEST_UTIL.getAdmin().getTableRegions(tn); + final List regions = TEST_UTIL.getAdmin().getRegions(tn); HMaster master = cluster.getMaster(); MasterQuotaManager quotaManager = master.getMasterQuotaManager(); - Map regionSizes = quotaManager.snapshotRegionSizes(); + Map regionSizes = quotaManager.snapshotRegionSizes(); // Wait until we get all of the region reports for our table // The table may split, so make sure we have at least as many as expected right after we // finished writing the data. @@ -181,9 +181,9 @@ public class TestRegionSizeUse { * @param regions A collection of region sizes * @return The number of regions for the given table. */ - private int numRegionsForTable(TableName tn, Map regions) { + private int numRegionsForTable(TableName tn, Map regions) { int sum = 0; - for (Entry entry : regions.entrySet()) { + for (Entry entry : regions.entrySet()) { if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) { sum++; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java index 9f6c9f8..e923cc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java @@ -38,7 +38,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; @@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RpcRetryingCaller; @@ -265,7 +265,7 @@ public class TestSpaceQuotas { HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); RegionServerSpaceQuotaManager spaceQuotaManager = rs.getRegionServerSpaceQuotaManager(); Map snapshots = spaceQuotaManager.copyQuotaSnapshots(); - Map regionSizes = getReportedSizesForTable(tn); + Map regionSizes = getReportedSizesForTable(tn); while (true) { SpaceQuotaSnapshot snapshot = snapshots.get(tn); if (snapshot != null && snapshot.getLimit() > 0) { @@ -348,11 +348,11 @@ public class TestSpaceQuotas { verifyViolation(policy, tn, p); } - private Map getReportedSizesForTable(TableName tn) { + private Map getReportedSizesForTable(TableName tn) { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); MasterQuotaManager quotaManager = master.getMasterQuotaManager(); - Map filteredRegionSizes = new HashMap<>(); - for (Entry entry : quotaManager.snapshotRegionSizes().entrySet()) { + Map filteredRegionSizes = new HashMap<>(); + for (Entry entry : quotaManager.snapshotRegionSizes().entrySet()) { if (entry.getKey().getTable().equals(tn)) { filteredRegionSizes.put(entry.getKey(), entry.getValue()); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java index fe7500d..5ba830d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java @@ -29,18 +29,15 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; @@ -49,6 +46,11 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; + /** * Test class for {@link TableQuotaSnapshotStore}. */ @@ -58,7 +60,7 @@ public class TestTableQuotaViolationStore { private Connection conn; private QuotaObserverChore chore; - private Map regionReports; + private Map regionReports; private TableQuotaSnapshotStore store; @Before @@ -78,13 +80,22 @@ public class TestTableQuotaViolationStore { assertEquals(0, size(store.filterBySubject(tn1))); for (int i = 0; i < 5; i++) { - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < 3; i++) { - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } for (int i = 0; i < 10; i++) { - regionReports.put(new HRegionInfo(tn3, Bytes.toBytes(i), Bytes.toBytes(i+1)), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 0L); } assertEquals(18, regionReports.size()); assertEquals(5, size(store.filterBySubject(tn1))); @@ -106,14 +117,23 @@ public class TestTableQuotaViolationStore { // Create some junk data to filter. Makes sure it's so large that it would // immediately violate the quota. for (int i = 0; i < 3; i++) { - regionReports.put(new HRegionInfo(tn2, Bytes.toBytes(i), Bytes.toBytes(i + 1)), - 5L * ONE_MEGABYTE); - regionReports.put(new HRegionInfo(tn3, Bytes.toBytes(i), Bytes.toBytes(i + 1)), - 5L * ONE_MEGABYTE); + regionReports.put(RegionInfoBuilder.newBuilder(tn2) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 5L * ONE_MEGABYTE); + regionReports.put(RegionInfoBuilder.newBuilder(tn3) + .setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)) + .build(), 5L * ONE_MEGABYTE); } - - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(0), Bytes.toBytes(1)), 1024L * 512L); - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(1), Bytes.toBytes(2)), 1024L * 256L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(0)) + .setEndKey(Bytes.toBytes(1)) + .build(), 1024L * 512L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(1)) + .setEndKey(Bytes.toBytes(2)) + .build(), 1024L * 256L); SpaceQuotaSnapshot tn1Snapshot = new SpaceQuotaSnapshot( SpaceQuotaStatus.notInViolation(), 1024L * 768L, 1024L * 1024L); @@ -121,13 +141,20 @@ public class TestTableQuotaViolationStore { // Below the quota assertEquals(tn1Snapshot, store.getTargetState(tn1, quota)); - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(2), Bytes.toBytes(3)), 1024L * 256L); + + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(2)) + .setEndKey(Bytes.toBytes(3)) + .build(), 1024L * 256L); tn1Snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 1024L * 1024L, 1024L * 1024L); // Equal to the quota is still in observance assertEquals(tn1Snapshot, store.getTargetState(tn1, quota)); - regionReports.put(new HRegionInfo(tn1, Bytes.toBytes(3), Bytes.toBytes(4)), 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1) + .setStartKey(Bytes.toBytes(3)) + .setEndKey(Bytes.toBytes(4)) + .build(), 1024L); tn1Snapshot = new SpaceQuotaSnapshot( new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L * 1024L + 1024L, 1024L * 1024L); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java index 2de649b..6fcbf77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java @@ -32,10 +32,10 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Before; @@ -176,7 +176,7 @@ public class TestTablesWithQuotas { when(admin.getTableRegions(missingTable)).thenReturn(null); QuotaObserverChore chore = mock(QuotaObserverChore.class); - Map regionUsage = new HashMap<>(); + Map regionUsage = new HashMap<>(); TableQuotaSnapshotStore store = new TableQuotaSnapshotStore(conn, chore, regionUsage); // A super dirty hack to verify that, after getting no regions for our table, http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 6a7e98b..674c3e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; @@ -97,7 +98,7 @@ public class TestAtomicOperation { static final byte [] row = Bytes.toBytes("rowA"); static final byte [] row2 = Bytes.toBytes("rowB"); - @Before + @Before public void setup() { tableName = Bytes.toBytes(name.getMethodName()); } @@ -115,7 +116,7 @@ public class TestAtomicOperation { } ////////////////////////////////////////////////////////////////////////////// // New tests that doesn't spin up a mini cluster but rather just test the - // individual code pieces in the HRegion. + // individual code pieces in the HRegion. ////////////////////////////////////////////////////////////////////////////// /** @@ -289,7 +290,7 @@ public class TestAtomicOperation { Result result = region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE); if (result != null) { assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, - Bytes.toLong(result.getValue(fam1, qual2))); + Bytes.toLong(result.getValue(fam1, qual2))); assertTrue(result.getValue(fam2, qual3) != null); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3))); @@ -335,8 +336,8 @@ public class TestAtomicOperation { Get g = new Get(row); Result result = region.get(g); - assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length); - assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length); + assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length); + assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length); } catch (IOException e) { e.printStackTrace(); failures.incrementAndGet(); @@ -568,7 +569,7 @@ public class TestAtomicOperation { this.failures = failures; } } - + private static CountDownLatch latch = new CountDownLatch(1); private enum TestStep { INIT, // initial put of 10 to set value of the cell @@ -580,11 +581,11 @@ public class TestAtomicOperation { } private static volatile TestStep testStep = TestStep.INIT; private final String family = "f1"; - + /** * Test written as a verifier for HBASE-7051, CheckAndPut should properly read - * MVCC. - * + * MVCC. + * * Moved into TestAtomicOperation from its original location, TestHBase7051 */ @Test @@ -598,7 +599,7 @@ public class TestAtomicOperation { Put put = new Put(Bytes.toBytes("r1")); put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10")); puts[0] = put; - + region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE); MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); @@ -661,7 +662,7 @@ public class TestAtomicOperation { public static class MockHRegion extends HRegion { public MockHRegion(Path tableDir, WAL log, FileSystem fs, Configuration conf, - final HRegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) { + final RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) { super(tableDir, log, fs, conf, regionInfo, htd, rsServices); } @@ -672,7 +673,7 @@ public class TestAtomicOperation { } return new WrappedRowLock(super.getRowLockInternal(row, readLock)); } - + public class WrappedRowLock implements RowLock { private final RowLock rowLock; http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index b36b8fe..e40ff8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -988,8 +989,7 @@ public class TestDefaultMemStore { final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList<>(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER, now, - r.getRegionInfo().toByteArray())); + HConstants.REGIONINFO_QUALIFIER, now, RegionInfo.toByteArray(r.getRegionInfo()))); // Set into the root table the version of the meta table. cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER, now, http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index ab60d9d..6a41742 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -28,6 +27,7 @@ import java.util.List; import java.util.Random; import java.util.Set; import java.util.TreeSet; +import java.util.stream.Collectors; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; @@ -36,7 +36,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; @@ -48,16 +47,10 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -69,10 +62,10 @@ import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterators; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; -import org.junit.rules.TestName; @Category(LargeTests.class) public class TestEndToEndSplitTransaction { @@ -154,7 +147,7 @@ public class TestEndToEndSplitTransaction { try { Random random = new Random(); for (int i= 0; i< 5; i++) { - List regions = + List regions = MetaTableAccessor.getTableRegions(connection, tableName, true); if (regions.isEmpty()) { continue; @@ -162,7 +155,7 @@ public class TestEndToEndSplitTransaction { int regionIndex = random.nextInt(regions.size()); //pick a random region and split it into two - HRegionInfo region = Iterators.get(regions.iterator(), regionIndex); + RegionInfo region = Iterators.get(regions.iterator(), regionIndex); //pick the mid split point int start = 0, end = Integer.MAX_VALUE; @@ -227,10 +220,10 @@ public class TestEndToEndSplitTransaction { /** verify region boundaries obtained from MetaScanner */ void verifyRegionsUsingMetaTableAccessor() throws Exception { - List regionList = MetaTableAccessor.getTableRegions(connection, tableName, true); - verifyTableRegions(Sets.newTreeSet(regionList)); + List regionList = MetaTableAccessor.getTableRegions(connection, tableName, true); + verifyTableRegions(regionList.stream().collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); regionList = MetaTableAccessor.getAllRegions(connection, true); - verifyTableRegions(Sets.newTreeSet(regionList)); + verifyTableRegions(regionList.stream().collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); } /** verify region boundaries obtained from HTable.getStartEndKeys() */ @@ -244,8 +237,7 @@ public class TestEndToEndSplitTransaction { Pair keys = rl.getStartEndKeys(); verifyStartEndKeys(keys); - //HTable.getRegionsInfo() - Set regions = new TreeSet<>(); + Set regions = new TreeSet<>(RegionInfo.COMPARATOR); for (HRegionLocation loc : rl.getAllRegionLocations()) { regions.add(loc.getRegionInfo()); } @@ -262,14 +254,14 @@ public class TestEndToEndSplitTransaction { verifyRegionsUsingHTable(); } - void verifyTableRegions(Set regions) { + void verifyTableRegions(Set regions) { log("Verifying " + regions.size() + " regions: " + regions); byte[][] startKeys = new byte[regions.size()][]; byte[][] endKeys = new byte[regions.size()][]; int i=0; - for (HRegionInfo region : regions) { + for (RegionInfo region : regions) { startKeys[i] = region.getStartKey(); endKeys[i] = region.getEndKey(); i++; @@ -352,21 +344,21 @@ public class TestEndToEndSplitTransaction { throws IOException, InterruptedException { long start = System.currentTimeMillis(); log("blocking until region is split:" + Bytes.toStringBinary(regionName)); - HRegionInfo daughterA = null, daughterB = null; + RegionInfo daughterA = null, daughterB = null; try (Connection conn = ConnectionFactory.createConnection(conf); Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { Result result = null; - HRegionInfo region = null; + RegionInfo region = null; while ((System.currentTimeMillis() - start) < timeout) { result = metaTable.get(new Get(regionName)); if (result == null) { break; } - region = MetaTableAccessor.getHRegionInfo(result); + region = MetaTableAccessor.getRegionInfo(result); if (region.isSplitParent()) { log("found parent region: " + region.toString()); - PairOfSameType pair = MetaTableAccessor.getDaughterRegions(result); + PairOfSameType pair = MetaTableAccessor.getDaughterRegions(result); daughterA = pair.getFirst(); daughterB = pair.getSecond(); break; @@ -396,7 +388,7 @@ public class TestEndToEndSplitTransaction { } } - public static void blockUntilRegionIsInMeta(Connection conn, long timeout, HRegionInfo hri) + public static void blockUntilRegionIsInMeta(Connection conn, long timeout, RegionInfo hri) throws IOException, InterruptedException { log("blocking until region is in META: " + hri.getRegionNameAsString()); long start = System.currentTimeMillis(); @@ -410,7 +402,7 @@ public class TestEndToEndSplitTransaction { } } - public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri) + public static void blockUntilRegionIsOpened(Configuration conf, long timeout, RegionInfo hri) throws IOException, InterruptedException { log("blocking until region is opened for reading:" + hri.getRegionNameAsString()); long start = System.currentTimeMillis(); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 9fcdf56..4d557b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; @@ -5822,7 +5823,7 @@ public class TestHRegion { // Helper for test testOpenRegionWrittenToWALForLogReplay static class HRegionWithSeqId extends HRegion { public HRegionWithSeqId(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final HRegionInfo regionInfo, + final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, final RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 477c870..829b488 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -41,11 +41,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -182,7 +183,7 @@ public class TestHRegionFileSystem { assertEquals(1, regionDirs.size()); List familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); assertEquals(2, familyDirs.size()); - HRegionInfo hri = table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo(); + RegionInfo hri = table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo(); HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); return regionFs; } @@ -194,7 +195,7 @@ public class TestHRegionFileSystem { Configuration conf = TEST_UTIL.getConfiguration(); // Create a Region - HRegionInfo hri = new HRegionInfo(TableName.valueOf(name.getMethodName())); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, FSUtils.getTableDir(rootDir, hri.getTable()), hri); @@ -203,7 +204,7 @@ public class TestHRegionFileSystem { assertTrue("The region folder should be created", fs.exists(regionDir)); // Verify the .regioninfo - HRegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); + RegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); assertEquals(hri, hriVerify); // Open the region @@ -226,7 +227,7 @@ public class TestHRegionFileSystem { Configuration conf = TEST_UTIL.getConfiguration(); // Create a Region - HRegionInfo hri = new HRegionInfo(TableName.valueOf(name.getMethodName())); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); assertTrue(fs.exists(regionFs.getRegionDir())); @@ -351,7 +352,8 @@ public class TestHRegionFileSystem { // Create a Region String familyName = "cf"; - HRegionInfo hri = new HRegionInfo(TableName.valueOf(name.getMethodName())); + ; + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); // New region, no store files http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 1b5ee08..a20fccb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -81,9 +81,9 @@ public class TestHRegionInfo { long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. - HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent( + org.apache.hadoop.hbase.client.RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent( r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); - assertTrue(hri.equals(deserializedHri)); + assertTrue(org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri) == 0); HBaseTestingUtility.closeRegionAndWAL(r); } @@ -300,12 +300,12 @@ public class TestHRegionInfo { String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay.indexOf(new String(HRegionInfo.HIDDEN_START_KEY))); String secondPart = descriptiveNameForDisplay.substring( - descriptiveNameForDisplay.indexOf(new String(HRegionInfo.HIDDEN_START_KEY)) + + descriptiveNameForDisplay.indexOf(new String(HRegionInfo.HIDDEN_START_KEY)) + HRegionInfo.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( - origDesc.indexOf(Bytes.toStringBinary(startKey)) + + origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); assert(firstPart.equals(firstPartOrig)); assert(secondPart.equals(secondPartOrig)); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java index 9305bbc..59e4ded 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.client.ConnectionConfiguration; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -407,7 +407,7 @@ public class TestMobStoreScanner { // Get the archive path Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration()); Path tableDir = FSUtils.getTableDir(rootDir, tn); - HRegionInfo regionInfo = MobUtils.getMobRegionInfo(tn); + RegionInfo regionInfo = MobUtils.getMobRegionInfo(tn); Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), regionInfo, tableDir, family); http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java index 3cba15c..edd5673 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java @@ -34,16 +34,14 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.ServerListener; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -55,6 +53,8 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.junit.rules.TestRule; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; + /** * Tests that a regionserver that dies after reporting for duty gets removed * from list of online regions. See HBASE-9593. @@ -124,12 +124,12 @@ public class TestRSKilledWhenInitializing { // showing still. The downed RegionServer should still be showing as registered. assertTrue(master.getMaster().getServerManager().isServerOnline(killedRS.get())); // Find non-meta region (namespace?) and assign to the killed server. That'll trigger cleanup. - Map assignments = null; + Map assignments = null; do { assignments = master.getMaster().getAssignmentManager().getRegionStates().getRegionAssignments(); } while (assignments == null || assignments.size() < 2); - HRegionInfo hri = null; - for (Map.Entry e: assignments.entrySet()) { + RegionInfo hri = null; + for (Map.Entry e: assignments.entrySet()) { if (e.getKey().isMetaRegion()) continue; hri = e.getKey(); break; http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index 3a404e9..cfecd9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -22,15 +22,19 @@ import java.io.IOException; import java.io.StringWriter; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.ipc.MetricsHBaseServer; +import org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperStub; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; @@ -45,16 +49,12 @@ import org.junit.rules.TestName; import org.mockito.Mockito; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.MetricsHBaseServer; -import org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperStub; -import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; /** * Tests for the region server status page and its template. @@ -101,7 +101,7 @@ public class TestRSStatusServlet { CacheConfig cacheConf = Mockito.mock(CacheConfig.class); Mockito.doReturn(null).when(cacheConf).getBlockCache(); Mockito.doReturn(cacheConf).when(rs).getCacheConfig(); - + // Fake MasterAddressTracker MasterAddressTracker mat = Mockito.mock(MasterAddressTracker.class); Mockito.doReturn(fakeMasterAddress).when(mat).getMasterAddress(); @@ -115,23 +115,28 @@ public class TestRSStatusServlet { Mockito.doReturn(new MetricsHBaseServerWrapperStub()).when(ms).getHBaseServerWrapper(); Mockito.doReturn(ms).when(rpcServer).getMetrics(); } - + @Test public void testBasic() throws IOException, ServiceException { new RSStatusTmpl().render(new StringWriter(), rs); } - + @Test public void testWithRegions() throws IOException, ServiceException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); - List regions = Lists.newArrayList( - new HRegionInfo(htd.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("d")), - new HRegionInfo(htd.getTableName(), Bytes.toBytes("d"), Bytes.toBytes("z")) + List regions = Lists.newArrayList( + RegionInfoBuilder.newBuilder(htd.getTableName()) + .setStartKey(Bytes.toBytes("a")) + .setEndKey(Bytes.toBytes("d")) + .build(), + RegionInfoBuilder.newBuilder(htd.getTableName()) + .setStartKey(Bytes.toBytes("d")) + .setEndKey(Bytes.toBytes("z")) + .build() ); Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse( regions)).when(rpcServices).getOnlineRegion((RpcController)Mockito.any(), (GetOnlineRegionRequest)Mockito.any()); - new RSStatusTmpl().render(new StringWriter(), rs); } }