hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [09/16] hbase git commit: HBASE-12972 Region, a supportable public/evolving subset of HRegion
Date Tue, 31 Mar 2015 01:40:28 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 5a32295..429f494 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -204,7 +204,8 @@ public class TestSplitTransactionOnCluster {
       HTable t = createTableAndWait(tableName, Bytes.toBytes("cf"));
       final List<HRegion> regions = cluster.getRegions(tableName);
       HRegionInfo hri = getAndCheckSingleTableRegion(regions);
-      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
+      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
+        .getRegionName());
       final HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
       insertData(tableName, admin, t);
       t.close();
@@ -264,7 +265,7 @@ public class TestSplitTransactionOnCluster {
       assertFalse("region still in transition", rit.containsKey(
           rit.containsKey(hri.getTable())));
 
-      List<HRegion> onlineRegions = regionServer.getOnlineRegions(tableName);
+      List<Region> onlineRegions = regionServer.getOnlineRegions(tableName);
       // Region server side split is successful.
       assertEquals("The parent region should be splitted", 2, onlineRegions.size());
       //Should be present in RIT
@@ -308,7 +309,7 @@ public class TestSplitTransactionOnCluster {
         Coprocessor.PRIORITY_USER, region.getBaseConf());
 
       // split async
-      this.admin.split(region.getRegionName(), new byte[] {42});
+      this.admin.split(region.getRegionInfo().getRegionName(), new byte[] {42});
 
       // we have to wait until the SPLITTING state is seen by the master
       FailingSplitRegionObserver observer = (FailingSplitRegionObserver) region
@@ -350,7 +351,7 @@ public class TestSplitTransactionOnCluster {
 
       HRegion region = cluster.getRegions(tableName).get(0);
       Store store = region.getStore(cf);
-      int regionServerIndex = cluster.getServerWith(region.getRegionName());
+      int regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
 
       Table t = new HTable(conf, tableName);
@@ -931,7 +932,8 @@ public class TestSplitTransactionOnCluster {
     List<HRegion> regions = null;
     try {
       regions = cluster.getRegions(tableName);
-      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
+      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
+        .getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
       insertData(tableName, admin, t);
       // Turn off balancer so it doesn't cut in and mess up our placements.
@@ -989,7 +991,8 @@ public class TestSplitTransactionOnCluster {
     } while (oldRegions.size() != 2);
     for (HRegion h : oldRegions) LOG.debug("OLDREGION " + h.getRegionInfo());
     try {
-      int regionServerIndex = cluster.getServerWith(oldRegions.get(0).getRegionName());
+      int regionServerIndex = cluster.getServerWith(oldRegions.get(0).getRegionInfo()
+        .getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
       insertData(tableName, admin, t);
       // Turn off balancer so it doesn't cut in and mess up our placements.
@@ -1000,7 +1003,7 @@ public class TestSplitTransactionOnCluster {
           tableName);
       assertEquals("The specified table should be present.", true, tableExists);
       final HRegion region = findSplittableRegion(oldRegions);
-      regionServerIndex = cluster.getServerWith(region.getRegionName());
+      regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName());
       regionServer = cluster.getRegionServer(regionServerIndex);
       assertTrue("not able to find a splittable region", region != null);
       String node = ZKAssign.getNodeName(regionServer.getZooKeeper(),
@@ -1082,7 +1085,8 @@ public class TestSplitTransactionOnCluster {
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
     ensureTableRegionNotOnSameServerAsMeta(admin, hri);
-    int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
+    int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
+      .getRegionName());
     HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
     // Turn off balancer so it doesn't cut in and mess up our placements.
     this.admin.setBalancerRunning(false, true);
@@ -1176,8 +1180,8 @@ public class TestSplitTransactionOnCluster {
     if (firstTableRegions.size() == 0 || secondTableRegions.size() == 0) {
       fail("Each table should have at least one region.");
     }
-    ServerName serverName =
-        cluster.getServerHoldingRegion(firstTable, firstTableRegions.get(0).getRegionName());
+    ServerName serverName = cluster.getServerHoldingRegion(firstTable,
+      firstTableRegions.get(0).getRegionInfo().getRegionName());
     admin.move(secondTableRegions.get(0).getRegionInfo().getEncodedNameAsBytes(),
       Bytes.toBytes(serverName.getServerName()));
     Table table1 = null;
@@ -1217,13 +1221,14 @@ public class TestSplitTransactionOnCluster {
       List<HRegion> regions = awaitTableRegions(tableName);
       assertTrue("Table not online", cluster.getRegions(tableName).size() != 0);
 
-      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
+      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
+        .getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
       final HRegion region = findSplittableRegion(regions);
       assertTrue("not able to find a splittable region", region != null);
       SplitTransaction st = new MockedSplitTransaction(region, Bytes.toBytes("row2")) {
         @Override
-        public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
+        public PairOfSameType<Region> stepsBeforePONR(final Server server,
             final RegionServerServices services, boolean testing) throws IOException {
           throw new SplittingNodeCreationFailedException ();
         }
@@ -1278,7 +1283,7 @@ public class TestSplitTransactionOnCluster {
         p.add(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
         region.put(p);
       }
-      region.flushcache();
+      region.flush(true);
       Store store = region.getStore(Bytes.toBytes("f"));
       Collection<StoreFile> storefiles = store.getStorefiles();
       assertEquals(storefiles.size(), 1);
@@ -1347,9 +1352,9 @@ public class TestSplitTransactionOnCluster {
       }
       admin.flush(desc.getTableName());
       List<HRegion> regions = cluster.getRegions(desc.getTableName());
-      int serverWith = cluster.getServerWith(regions.get(0).getRegionName());
+      int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(serverWith);
-      cluster.getServerWith(regions.get(0).getRegionName());
+      cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
       SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3"));
       st.prepare();
       st.stepsBeforePONR(regionServer, regionServer, false);
@@ -1426,8 +1431,8 @@ public class TestSplitTransactionOnCluster {
     }
 
     @Override
-    public void completeSplitTransaction(RegionServerServices services, HRegion a, HRegion b,
-        SplitTransactionDetails std, HRegion parent) throws IOException {
+    public void completeSplitTransaction(RegionServerServices services, Region a, Region b,
+        SplitTransactionDetails std, Region parent) throws IOException {
       if (this.currentRegion.getRegionInfo().getTable().getNameAsString()
           .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) {
         try {
@@ -1643,17 +1648,17 @@ public class TestSplitTransactionOnCluster {
 
   public static class MockedRegionObserver extends BaseRegionObserver {
     private SplitTransaction st = null;
-    private PairOfSameType<HRegion> daughterRegions = null;
+    private PairOfSameType<Region> daughterRegions = null;
 
     @Override
     public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
         byte[] splitKey, List<Mutation> metaEntries) throws IOException {
       RegionCoprocessorEnvironment environment = ctx.getEnvironment();
       HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
-      List<HRegion> onlineRegions =
+      List<Region> onlineRegions =
           rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"));
-      HRegion region = onlineRegions.get(0);
-      for (HRegion r : onlineRegions) {
+      Region region = onlineRegions.get(0);
+      for (Region r : onlineRegions) {
         if (r.getRegionInfo().containsRow(splitKey)) {
           region = r;
           break;
@@ -1666,7 +1671,7 @@ public class TestSplitTransactionOnCluster {
         ctx.bypass();
         return;
       }
-      region.forceSplit(splitKey);
+      ((HRegion)region).forceSplit(splitKey);
       daughterRegions = st.stepsBeforePONR(rs, rs, false);
       HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo());
       copyOfParent.setOffline(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
index 67c8adb..dfffbd4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
@@ -93,13 +93,15 @@ public class TestStoreFileRefresherChore {
     }
   }
 
-  private HRegion initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) throws IOException {
+  private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
+      throws IOException {
     Configuration conf = TEST_UTIL.getConfiguration();
     Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
 
     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);
 
-    HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
+    HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir,
+      info);
     final Configuration walConf = new Configuration(conf);
     FSUtils.setRootDir(walConf, tableDir);
     final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
@@ -110,7 +112,8 @@ public class TestStoreFileRefresherChore {
     return region;
   }
 
-  private void putData(HRegion region, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException {
+  private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families)
+      throws IOException {
     for (int i = startRow; i < startRow + numRows; i++) {
       Put put = new Put(Bytes.toBytes("" + i));
       put.setDurability(Durability.SKIP_WAL);
@@ -121,7 +124,7 @@ public class TestStoreFileRefresherChore {
     }
   }
 
-  private void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families)
+  private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families)
       throws IOException {
     for (int i = startRow; i < startRow + numRows; i++) {
       byte[] row = Bytes.toBytes("" + i);
@@ -159,13 +162,13 @@ public class TestStoreFileRefresherChore {
     byte[] qf = Bytes.toBytes("cq");
 
     HRegionServer regionServer = mock(HRegionServer.class);
-    List<HRegion> regions = new ArrayList<HRegion>();
+    List<Region> regions = new ArrayList<Region>();
     when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions);
     when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
 
     HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families);
-    HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
-    HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
+    Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
+    Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
     regions.add(primary);
     regions.add(replica1);
 
@@ -173,7 +176,7 @@ public class TestStoreFileRefresherChore {
 
     // write some data to primary and flush
     putData(primary, 0, 100, qf, families);
-    primary.flushcache();
+    primary.flush(true);
     verifyData(primary, 0, 100, qf, families);
 
     try {
@@ -186,11 +189,11 @@ public class TestStoreFileRefresherChore {
     verifyData(replica1, 0, 100, qf, families);
 
     // simulate an fs failure where we cannot refresh the store files for the replica
-    ((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true;
+    ((FailingHRegionFileSystem)((HRegion)replica1).getRegionFileSystem()).fail = true;
 
     // write some more data to primary and flush
     putData(primary, 100, 100, qf, families);
-    primary.flushcache();
+    primary.flush(true);
     verifyData(primary, 0, 200, qf, families);
 
     chore.chore(); // should not throw ex, but we cannot refresh the store files

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
index e55707e..0b410fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
@@ -72,8 +73,8 @@ public class TestCompactionWithThroughputController {
     List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
     for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
       HRegionServer hrs = rsts.get(i).getRegionServer();
-      for (HRegion region : hrs.getOnlineRegions(tableName)) {
-        return region.getStores().values().iterator().next();
+      for (Region region : hrs.getOnlineRegions(tableName)) {
+        return region.getStores().iterator().next();
       }
     }
     return null;
@@ -119,8 +120,6 @@ public class TestCompactionWithThroughputController {
       assertEquals(10, store.getStorefilesCount());
       long startTime = System.currentTimeMillis();
       TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
-      Thread.sleep(5000);
-      assertEquals(10, store.getStorefilesCount());
       while (store.getStorefilesCount() != 1) {
         Thread.sleep(20);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
index 8c152fd..d204ee0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
@@ -450,7 +450,7 @@ public class TestFSHLog {
             System.currentTimeMillis(), clusterIds, -1, -1);
         wal.append(htd, info, logkey, edits, region.getSequenceId(), true, null);
       }
-      region.flushcache();
+      region.flush(true);
       // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
       long currentSequenceId = region.getSequenceId().get();
       // Now release the appends

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index 2e18fba..caf05a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -222,11 +222,8 @@ public class TestLogRolling  {
         " log files");
 
       // flush all regions
-
-      List<HRegion> regions =
-        new ArrayList<HRegion>(server.getOnlineRegionsLocalContext());
-      for (HRegion r: regions) {
-        r.flushcache();
+      for (Region r: server.getOnlineRegionsLocalContext()) {
+        r.flush(true);
       }
 
       // Now roll the log
@@ -529,9 +526,8 @@ public class TestLogRolling  {
       assertTrue(loggedRows.contains("row1005"));
 
       // flush all regions
-      List<HRegion> regions = new ArrayList<HRegion>(server.getOnlineRegionsLocalContext());
-      for (HRegion r: regions) {
-        r.flushcache();
+      for (Region r: server.getOnlineRegionsLocalContext()) {
+        r.flush(true);
       }
 
       ResultScanner scanner = table.getScanner(new Scan());
@@ -573,7 +569,7 @@ public class TestLogRolling  {
 
       server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
       final WAL log = server.getWAL(null);
-      HRegion region = server.getOnlineRegions(table2.getName()).get(0);
+      Region region = server.getOnlineRegions(table2.getName()).get(0);
       Store s = region.getStore(HConstants.CATALOG_FAMILY);
 
       //have to flush namespace to ensure it doesn't affect wall tests
@@ -594,7 +590,7 @@ public class TestLogRolling  {
       assertEquals("Should have WAL; one table is not flushed", 1,
           DefaultWALProvider.getNumRolledLogFiles(log));
       admin.flush(table2.getName());
-      region.compactStores();
+      region.compact(false);
       // Wait for compaction in case if flush triggered it before us.
       Assert.assertNotNull(s);
       for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index fe2a37d..dbd2a4a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -203,9 +204,9 @@ public class TestWALReplay {
     assertEquals(1, regions.size());
 
     // move region to another regionserver
-    HRegion destRegion = regions.get(0);
+    Region destRegion = regions.get(0);
     int originServerNum = hbaseCluster
-        .getServerWith(destRegion.getRegionName());
+        .getServerWith(destRegion.getRegionInfo().getRegionName());
     assertTrue("Please start more than 1 regionserver", hbaseCluster
         .getRegionServerThreads().size() > 1);
     int destServerNum = 0;
@@ -229,13 +230,13 @@ public class TestWALReplay {
     assertEquals(0, count);
 
     // flush region and make major compaction
-    destServer.getOnlineRegion(destRegion.getRegionName()).flushcache();
+    Region region =  destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
+    region.flush(true);
     // wait to complete major compaction
-    for (Store store : destServer.getOnlineRegion(destRegion.getRegionName())
-        .getStores().values()) {
+    for (Store store : region.getStores()) {
       store.triggerMajorCompaction();
     }
-    destServer.getOnlineRegion(destRegion.getRegionName()).compactStores();
+    region.compact(true);
 
     // move region to origin regionserver
     moveRegionAndWait(destRegion, originServer);
@@ -251,7 +252,7 @@ public class TestWALReplay {
     resultScanner.close();
   }
 
-  private void moveRegionAndWait(HRegion destRegion, HRegionServer destServer)
+  private void moveRegionAndWait(Region destRegion, HRegionServer destServer)
       throws InterruptedException, MasterNotRunningException,
       ZooKeeperConnectionException, IOException {
     HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
@@ -286,8 +287,7 @@ public class TestWALReplay {
     deleteDir(basedir);
 
     HTableDescriptor htd = createBasic3FamilyHTD(tableName);
-    HRegion region2 = HRegion.createHRegion(hri,
-        hbaseRootDir, this.conf, htd);
+    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
     HRegion.closeHRegion(region2);
     final byte [] rowName = tableName.getName();
 
@@ -348,8 +348,7 @@ public class TestWALReplay {
     final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
     deleteDir(basedir);
     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
-    HRegion region2 = HRegion.createHRegion(hri,
-        hbaseRootDir, this.conf, htd);
+    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
     HRegion.closeHRegion(region2);
     WAL wal = createWAL(this.conf);
     HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
@@ -360,7 +359,7 @@ public class TestWALReplay {
         Bytes.toBytes("z"), 10);
     List <Pair<byte[],String>>  hfs= new ArrayList<Pair<byte[],String>>(1);
     hfs.add(Pair.newPair(family, f.toString()));
-    region.bulkLoadHFiles(hfs, true);
+    region.bulkLoadHFiles(hfs, true, null);
 
     // Add an edit so something in the WAL
     byte [] row = tableName.getName();
@@ -434,12 +433,12 @@ public class TestWALReplay {
           Bytes.toBytes(i + "50"), 10);
       hfs.add(Pair.newPair(family, f.toString()));
     }
-    region.bulkLoadHFiles(hfs, true);
+    region.bulkLoadHFiles(hfs, true, null);
     final int rowsInsertedCount = 31;
     assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
 
     // major compact to turn all the bulk loaded files into one normal file
-    region.compactStores(true);
+    region.compact(true);
     assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
 
     // Now 'crash' the region by stealing its wal
@@ -502,7 +501,7 @@ public class TestWALReplay {
       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
       if (first ) {
         // If first, so we have at least one family w/ different seqid to rest.
-        region.flushcache();
+        region.flush(true);
         first = false;
       }
     }
@@ -618,7 +617,7 @@ public class TestWALReplay {
       result.size());
 
     // Let us flush the region
-    region.flushcache();
+    region.flush(true);
     region.close(true);
     wal.shutdown();
 
@@ -713,7 +712,7 @@ public class TestWALReplay {
     // Let us flush the region
     CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
     try {
-      region.flushcache();
+      region.flush(true);
       fail("Injected exception hasn't been thrown");
     } catch (Throwable t) {
       LOG.info("Expected simulated exception when flushing region,"
@@ -733,7 +732,7 @@ public class TestWALReplay {
     // call flush again
     CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
     try {
-      region.flushcache();
+      region.flush(true);
     } catch (IOException t) {
       LOG.info("Expected exception when flushing region because server is stopped,"
           + t.getMessage());
@@ -890,7 +889,7 @@ public class TestWALReplay {
 
     // Let us flush the region
     // But this time completeflushcache is not yet done
-    region.flushcache();
+    region.flush(true);
     for (HColumnDescriptor hcd : htd.getFamilies()) {
       addRegionEdits(rowName, hcd.getName(), 5, this.ee, region, "x");
     }
@@ -965,16 +964,16 @@ public class TestWALReplay {
     private HRegion r;
 
     @Override
-    public void requestFlush(HRegion region, boolean forceFlushAllStores) {
+    public void requestFlush(Region region, boolean force) {
       try {
-        r.flushcache(forceFlushAllStores);
+        r.flush(force);
       } catch (IOException e) {
         throw new RuntimeException("Exception flushing", e);
       }
     }
 
     @Override
-    public void requestDelayedFlush(HRegion region, long when, boolean forceFlushAllStores) {
+    public void requestDelayedFlush(Region region, long when, boolean forceFlushAllStores) {
       // TODO Auto-generated method stub
 
     }
@@ -1013,7 +1012,7 @@ public class TestWALReplay {
   }
 
   static List<Put> addRegionEdits (final byte [] rowName, final byte [] family,
-      final int count, EnvironmentEdge ee, final HRegion r,
+      final int count, EnvironmentEdge ee, final Region r,
       final String qualifierPrefix)
   throws IOException {
     List<Put> puts = new ArrayList<Put>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
index c2bb2ce..ae38ec6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
@@ -224,7 +224,7 @@ public class TestMultiSlaveReplication {
 
     // request a roll
     admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(),
-      region.getRegionName()));
+      region.getRegionInfo().getRegionName()));
 
     // wait
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
index 7acc859..bdda4cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
@@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -210,22 +210,22 @@ public class TestRegionReplicaReplicationEndpoint {
   private void verifyReplication(TableName tableName, int regionReplication,
       final int startRow, final int endRow, final boolean present) throws Exception {
     // find the regions
-    final HRegion[] regions = new HRegion[regionReplication];
+    final Region[] regions = new Region[regionReplication];
 
     for (int i=0; i < NB_SERVERS; i++) {
       HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(i);
-      List<HRegion> onlineRegions = rs.getOnlineRegions(tableName);
-      for (HRegion region : onlineRegions) {
+      List<Region> onlineRegions = rs.getOnlineRegions(tableName);
+      for (Region region : onlineRegions) {
         regions[region.getRegionInfo().getReplicaId()] = region;
       }
     }
 
-    for (HRegion region : regions) {
+    for (Region region : regions) {
       assertNotNull(region);
     }
 
     for (int i = 1; i < regionReplication; i++) {
-      final HRegion region = regions[i];
+      final Region region = regions[i];
       // wait until all the data is replicated to all secondary regions
       Waiter.waitFor(HTU.getConfiguration(), 90000, new Waiter.Predicate<Exception>() {
         @Override
@@ -234,8 +234,7 @@ public class TestRegionReplicaReplicationEndpoint {
           try {
             HTU.verifyNumericRows(region, HBaseTestingUtility.fam1, startRow, endRow, present);
           } catch(Throwable ex) {
-            LOG.warn("Verification from secondary region is not complete yet. Got:" + ex
-              + " " + ex.getMessage());
+            LOG.warn("Verification from secondary region is not complete yet", ex);
             // still wait
             return false;
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index 5f2c737..0a82161 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
@@ -178,7 +179,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     // replay the edits to the secondary using replay callable
     replicateUsingCallable(connection, entries);
 
-    HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
+    Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);
@@ -218,7 +219,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     // replay the edits to the secondary using replay callable
     replicateUsingCallable(connection, entries);
 
-    HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
+    Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
@@ -261,7 +262,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     // replay the edits to the secondary using replay callable
     replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries)));
 
-    HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
+    Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index 6487ebe..89f10db 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -288,7 +288,7 @@ public class SecureTestUtil {
   private static List<AccessController> getAccessControllers(MiniHBaseCluster cluster) {
     List<AccessController> result = Lists.newArrayList();
     for (RegionServerThread t: cluster.getLiveRegionServerThreads()) {
-      for (HRegion region: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for (Region region: t.getRegionServer().getOnlineRegionsLocalContext()) {
         Coprocessor cp = region.getCoprocessorHost()
           .findCoprocessor(AccessController.class.getName());
         if (cp != null) {
@@ -323,7 +323,7 @@ public class SecureTestUtil {
         for (Map.Entry<AccessController,Long> e: mtimes.entrySet()) {
           if (!oldMTimes.containsKey(e.getKey())) {
             LOG.error("Snapshot of AccessController state does not include instance on region " +
-              e.getKey().getRegion().getRegionNameAsString());
+              e.getKey().getRegion().getRegionInfo().getRegionNameAsString());
             // Error out the predicate, we will try again
             return false;
           }
@@ -331,8 +331,8 @@ public class SecureTestUtil {
           long now = e.getValue();
           if (now <= old) {
             LOG.info("AccessController on region " +
-              e.getKey().getRegion().getRegionNameAsString() + " has not updated: mtime=" +
-              now);
+              e.getKey().getRegion().getRegionInfo().getRegionNameAsString() +
+              " has not updated: mtime=" + now);
             return false;
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index d8f4d2d..de452f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessCont
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.ScanType;
@@ -235,7 +236,7 @@ public class TestAccessController extends SecureTestUtil {
     admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
     TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE.getTableName());
 
-    HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0);
+    Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0);
     RegionCoprocessorHost rcpHost = region.getCoprocessorHost();
     RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER,
       Coprocessor.PRIORITY_HIGHEST, 1, conf);
@@ -2198,7 +2199,7 @@ public class TestAccessController extends SecureTestUtil {
     for (JVMClusterUtil.RegionServerThread thread:
         TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
       HRegionServer rs = thread.getRegionServer();
-      for (HRegion region: rs.getOnlineRegions(TEST_TABLE.getTableName())) {
+      for (Region region: rs.getOnlineRegions(TEST_TABLE.getTableName())) {
         region.getCoprocessorHost().load(PingCoprocessor.class,
           Coprocessor.PRIORITY_USER, conf);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
index be5588d..cbbacf3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.OperationStatus;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode;
@@ -79,7 +79,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
   private final ExpressionParser expressionParser = new ExpressionParser();
   private final ExpressionExpander expressionExpander = new ExpressionExpander();
   private Configuration conf;
-  private HRegion labelsRegion;
+  private Region labelsRegion;
   private List<ScanLabelGenerator> scanLabelGenerators;
   private List<String> superUsers;
   private List<String> superGroups;

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
index 0e2540a..1f9f79e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
@@ -56,8 +56,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResul
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -294,7 +294,7 @@ public abstract class TestVisibilityLabels {
         List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
             .getRegionServerThreads();
         for (RegionServerThread rsThread : regionServerThreads) {
-          List<HRegion> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
+          List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
               LABELS_TABLE_NAME);
           if (onlineRegions.size() > 0) {
             rsThread.getRegionServer().abort("Aborting ");
@@ -328,7 +328,7 @@ public abstract class TestVisibilityLabels {
     for (RegionServerThread rsThread : regionServerThreads) {
       while (true) {
         if (!rsThread.getRegionServer().isAborted()) {
-          List<HRegion> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
+          List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(
               LABELS_TABLE_NAME);
           if (onlineRegions.size() > 0) {
             break;
@@ -385,7 +385,7 @@ public abstract class TestVisibilityLabels {
       } catch (InterruptedException e) {
       }
     }
-    HRegion labelsTableRegion = regionServer.getOnlineRegions(LABELS_TABLE_NAME).get(0);
+    Region labelsTableRegion = regionServer.getOnlineRegions(LABELS_TABLE_NAME).get(0);
     while (labelsTableRegion.isRecovering()) {
       try {
         Thread.sleep(10);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index c8abff0..7f04eb3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -61,9 +61,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSVisitor;
@@ -627,8 +627,8 @@ public class SnapshotTestingUtils {
                                             final TableName tableName)
       throws IOException, InterruptedException {
     HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
-    List<HRegion> onlineRegions = rs.getOnlineRegions(tableName);
-    for (HRegion region : onlineRegions) {
+    List<Region> onlineRegions = rs.getOnlineRegions(tableName);
+    for (Region region : onlineRegions) {
       region.waitForFlushesAndCompactions();
     }
     // Wait up to 60 seconds for a table to be available.

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index c616b62..da39ec1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -2690,9 +2690,9 @@ public class TestHBaseFsck {
       }
       admin.flush(desc.getTableName());
       List<HRegion> regions = cluster.getRegions(desc.getTableName());
-      int serverWith = cluster.getServerWith(regions.get(0).getRegionName());
+      int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(serverWith);
-      cluster.getServerWith(regions.get(0).getRegionName());
+      cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
       SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3"));
       st.prepare();
       st.stepsBeforePONR(regionServer, regionServer, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
index 3332c0f..67a275e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
@@ -44,14 +44,13 @@ import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.io.crypto.aes.AES;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -141,9 +140,9 @@ public class TestHBaseFsckEncryption {
 
   private List<Path> findStorefilePaths(TableName tableName) throws Exception {
     List<Path> paths = new ArrayList<Path>();
-    for (HRegion region:
+    for (Region region:
         TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) {
-      for (Store store: region.getStores().values()) {
+      for (Store store: region.getStores()) {
         for (StoreFile storefile: store.getStorefiles()) {
           paths.add(storefile.getPath());
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
index 04fa5bf..1639467 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
@@ -143,7 +143,7 @@ public class TestMergeTable {
   throws IOException {
     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
     HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
-    LOG.info("Created region " + region.getRegionNameAsString());
+    LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
     for(int i = firstRow; i < firstRow + nrows; i++) {
       Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
       put.setDurability(Durability.SKIP_WAL);
@@ -151,7 +151,7 @@ public class TestMergeTable {
       region.put(put);
       if (i % 10000 == 0) {
         LOG.info("Flushing write #" + i);
-        region.flushcache();
+        region.flush(true);
       }
     }
     HRegion.closeHRegion(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java
index 139e143..a736c71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.Before;
@@ -129,8 +129,8 @@ public class TestWALFiltering {
   private List<byte[]> getRegionsByServer(int rsId) throws IOException {
     List<byte[]> regionNames = Lists.newArrayList();
     HRegionServer hrs = getRegionServer(rsId);
-    for (HRegion r : hrs.getOnlineRegions(TABLE_NAME)) {
-      regionNames.add(r.getRegionName());
+    for (Region r : hrs.getOnlineRegions(TABLE_NAME)) {
+      regionNames.add(r.getRegionInfo().getRegionName());
     }
     return regionNames;
   }


Mime
View raw message