hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [04/16] hbase git commit: HBASE-12972 Region, a supportable public/evolving subset of HRegion
Date Tue, 31 Mar 2015 01:40:23 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index 6002f29..4f96585 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -173,11 +173,11 @@ public class Merge extends Configured implements Tool {
   throws IOException {
     if (info1 == null) {
       throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
-          Bytes.toStringBinary(meta.getRegionName()));
+          Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
     }
     if (info2 == null) {
       throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
-          Bytes.toStringBinary(meta.getRegionName()));
+          Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
     }
     HRegion merged = null;
     HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
index 8ea6178..7d1ff0d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -231,7 +232,7 @@ public abstract class HBaseTestCase extends TestCase {
    * @throws IOException
    * @return count of what we added.
    */
-  public static long addContent(final HRegion r, final byte [] columnFamily, final byte[] column)
+  public static long addContent(final Region r, final byte [] columnFamily, final byte[] column)
   throws IOException {
     byte [] startKey = r.getRegionInfo().getStartKey();
     byte [] endKey = r.getRegionInfo().getEndKey();
@@ -243,8 +244,7 @@ public abstract class HBaseTestCase extends TestCase {
       startKeyBytes, endKey, -1);
   }
 
-  public static long addContent(final HRegion r, final byte [] columnFamily)
-  throws IOException {
+  public static long addContent(final Region r, final byte [] columnFamily) throws IOException {
     return addContent(r, columnFamily, null);
   }
 
@@ -440,6 +440,10 @@ public abstract class HBaseTestCase extends TestCase {
       this.region = HRegion;
     }
 
+    public HRegionIncommon(final Region region) {
+      this.region = (HRegion)region;
+    }
+
     public void put(Put put) throws IOException {
       region.put(put);
     }
@@ -470,7 +474,7 @@ public abstract class HBaseTestCase extends TestCase {
       }
 
     public void flushcache() throws IOException {
-      this.region.flushcache();
+      this.region.flush(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 0445cb0..ff79569 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
@@ -287,6 +288,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   }
 
   /**
+   * Close both the region {@code r} and it's underlying WAL. For use in tests.
+   */
+  public static void closeRegionAndWAL(final Region r) throws IOException {
+    closeRegionAndWAL((HRegion)r);
+  }
+
+  /**
    * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
    */
   public static void closeRegionAndWAL(final HRegion r) throws IOException {
@@ -2131,6 +2139,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     return loadRegion(r, f, false);
   }
 
+  public int loadRegion(final Region r, final byte[] f) throws IOException {
+    return loadRegion((HRegion)r, f);
+  }
+
   /**
    * Load region with rows from 'aaa' to 'zzz'.
    * @param r Region
@@ -2152,8 +2164,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
           Put put = new Put(k);
           put.setDurability(Durability.SKIP_WAL);
           put.add(f, null, k);
-          if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL);
-
+          if (r.getWAL() == null) {
+            put.setDurability(Durability.SKIP_WAL);
+          }
           int preRowCount = rowCount;
           int pause = 10;
           int maxPause = 1000;
@@ -2169,7 +2182,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
         }
       }
       if (flush) {
-        r.flushcache();
+        r.flush(true);
       }
     }
     return rowCount;
@@ -2204,11 +2217,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
   }
 
+  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
+      throws IOException {
+    verifyNumericRows((HRegion)region, f, startRow, endRow);
+  }
+
   public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
       throws IOException {
     verifyNumericRows(region, f, startRow, endRow, true);
   }
 
+  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
+      final boolean present) throws IOException {
+    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
+  }
+
   public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
       final boolean present) throws IOException {
     for (int i = startRow; i < endRow; i++) {
@@ -3755,10 +3778,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
           if (server.equals(rs.getServerName())) {
             continue;
           }
-          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
-          for (HRegion r: hrs) {
+          Collection<Region> hrs = rs.getOnlineRegionsLocalContext();
+          for (Region r: hrs) {
             assertTrue("Region should not be double assigned",
-              r.getRegionId() != hri.getRegionId());
+              r.getRegionInfo().getRegionId() != hri.getRegionId());
           }
         }
         return; // good, we are happy

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 24b6e71..4a02b04 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
@@ -535,8 +536,8 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void flushcache() throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
-        r.flushcache();
+      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+        r.flush(true);
       }
     }
   }
@@ -548,9 +549,9 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void flushcache(TableName tableName) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         if(r.getTableDesc().getTableName().equals(tableName)) {
-          r.flushcache();
+          r.flush(true);
         }
       }
     }
@@ -563,8 +564,8 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void compact(boolean major) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
-        r.compactStores(major);
+      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+        r.compact(major);
       }
     }
   }
@@ -576,9 +577,9 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void compact(TableName tableName, boolean major) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         if(r.getTableDesc().getTableName().equals(tableName)) {
-          r.compactStores(major);
+          r.compact(major);
         }
       }
     }
@@ -615,9 +616,9 @@ public class MiniHBaseCluster extends HBaseCluster {
     List<HRegion> ret = new ArrayList<HRegion>();
     for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
       HRegionServer hrs = rst.getRegionServer();
-      for (HRegion region : hrs.getOnlineRegionsLocalContext()) {
+      for (Region region : hrs.getOnlineRegionsLocalContext()) {
         if (region.getTableDesc().getTableName().equals(tableName)) {
-          ret.add(region);
+          ret.add((HRegion)region);
         }
       }
     }
@@ -643,8 +644,7 @@ public class MiniHBaseCluster extends HBaseCluster {
     int count = 0;
     for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) {
       HRegionServer hrs = rst.getRegionServer();
-      HRegion metaRegion =
-        hrs.getOnlineRegion(regionName);
+      Region metaRegion = hrs.getOnlineRegion(regionName);
       if (metaRegion != null) {
         index = count;
         break;
@@ -662,7 +662,7 @@ public class MiniHBaseCluster extends HBaseCluster {
     // should hold some regions. Please refer to #countServedRegions
     // to see how we find out all regions.
     HMaster master = getMaster();
-    HRegion region = master.getOnlineRegion(regionName);
+    Region region = master.getOnlineRegion(regionName);
     if (region != null) {
       return master.getServerName();
     }
@@ -712,9 +712,9 @@ public class MiniHBaseCluster extends HBaseCluster {
     ArrayList<HRegion> ret = new ArrayList<HRegion>();
     for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
       HRegionServer hrs = rst.getRegionServer();
-      for (HRegion region : hrs.getOnlineRegions(tableName)) {
+      for (Region region : hrs.getOnlineRegions(tableName)) {
         if (region.getTableDesc().getTableName().equals(tableName)) {
-          ret.add(region);
+          ret.add((HRegion)region);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 6d0a3c7..810ab90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
 import org.apache.hadoop.hbase.regionserver.Leases;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
@@ -59,7 +59,7 @@ import com.google.protobuf.Service;
  */
 public class MockRegionServerServices implements RegionServerServices {
   protected static final Log LOG = LogFactory.getLog(MockRegionServerServices.class);
-  private final Map<String, HRegion> regions = new HashMap<String, HRegion>();
+  private final Map<String, Region> regions = new HashMap<String, Region>();
   private final ConcurrentSkipListMap<byte[], Boolean> rit =
     new ConcurrentSkipListMap<byte[], Boolean>(Bytes.BYTES_COMPARATOR);
   private HFileSystem hfs = null;
@@ -90,17 +90,17 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public boolean removeFromOnlineRegions(HRegion r, ServerName destination) {
+  public boolean removeFromOnlineRegions(Region r, ServerName destination) {
     return this.regions.remove(r.getRegionInfo().getEncodedName()) != null;
   }
 
   @Override
-  public HRegion getFromOnlineRegions(String encodedRegionName) {
+  public Region getFromOnlineRegions(String encodedRegionName) {
     return this.regions.get(encodedRegionName);
   }
 
   @Override
-  public List<HRegion> getOnlineRegions(TableName tableName) throws IOException {
+  public List<Region> getOnlineRegions(TableName tableName) throws IOException {
     return null;
   }
 
@@ -110,13 +110,12 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public void addToOnlineRegions(HRegion r) {
+  public void addToOnlineRegions(Region r) {
     this.regions.put(r.getRegionInfo().getEncodedName(), r);
   }
 
   @Override
-  public void postOpenDeployTasks(HRegion r)
-      throws KeeperException, IOException {
+  public void postOpenDeployTasks(Region r) throws KeeperException, IOException {
     addToOnlineRegions(r);
   }
 
@@ -258,7 +257,7 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public Map<String, HRegion> getRecoveringRegions() {
+  public Map<String, Region> getRecoveringRegions() {
     // TODO Auto-generated method stub
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index d8178f0..2d08164 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -90,7 +90,7 @@ public class TestGlobalMemStoreSize {
           ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
         globalMemStoreSize += 
           server.getFromOnlineRegions(regionInfo.getEncodedName()).
-          getMemstoreSize().get();
+          getMemstoreSize();
       }
       assertEquals(server.getRegionServerAccounting().getGlobalMemstoreSize(),
         globalMemStoreSize);
@@ -104,7 +104,7 @@ public class TestGlobalMemStoreSize {
 
       for (HRegionInfo regionInfo :
           ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
-        HRegion r = server.getFromOnlineRegions(regionInfo.getEncodedName());
+        Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
         flush(r, server);
       }
       LOG.info("Post flush on " + server.getServerName());
@@ -120,14 +120,14 @@ public class TestGlobalMemStoreSize {
         // our test was running....
         for (HRegionInfo regionInfo :
             ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
-          HRegion r = server.getFromOnlineRegions(regionInfo.getEncodedName());
-          long l = r.getMemstoreSize().longValue();
+          Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
+          long l = r.getMemstoreSize();
           if (l > 0) {
             // Only meta could have edits at this stage.  Give it another flush
             // clear them.
             assertTrue(regionInfo.isMetaRegion());
             LOG.info(r.toString() + " " + l + ", reflushing");
-            r.flushcache();
+            r.flush(true);
           }
         }
       }
@@ -145,10 +145,10 @@ public class TestGlobalMemStoreSize {
    * @param server
    * @throws IOException
    */
-  private void flush(final HRegion r, final HRegionServer server)
+  private void flush(final Region r, final HRegionServer server)
   throws IOException {
     LOG.info("Flush " + r.toString() + " on " + server.getServerName() +
-      ", " +  r.flushcache() + ", size=" +
+      ", " +  r.flush(true) + ", size=" +
       server.getRegionServerAccounting().getGlobalMemstoreSize());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index f44eb7b..48a5dbd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -290,13 +291,14 @@ public class TestIOFencing {
       long startWaitTime = System.currentTimeMillis();
       while (compactingRegion.getEarliestFlushTimeForAllStores() <= lastFlushTime ||
           compactingRegion.countStoreFiles() <= 1) {
-        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
+        LOG.info("Waiting for the region to flush " +
+          compactingRegion.getRegionInfo().getRegionNameAsString());
         Thread.sleep(1000);
         assertTrue("Timed out waiting for the region to flush",
           System.currentTimeMillis() - startWaitTime < 30000);
       }
       assertTrue(compactingRegion.countStoreFiles() > 1);
-      final byte REGION_NAME[] = compactingRegion.getRegionName();
+      final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName();
       LOG.info("Asking for compaction");
       ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName());
       LOG.info("Waiting for compaction to be about to start");
@@ -314,7 +316,7 @@ public class TestIOFencing {
       Waiter.waitFor(c, 60000, new Waiter.Predicate<Exception>() {
         @Override
         public boolean evaluate() throws Exception {
-          HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
+          Region newRegion = newServer.getOnlineRegion(REGION_NAME);
           return newRegion != null && !newRegion.isRecovering();
         }
       });

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index 903ce0e..3fc7594 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -231,7 +231,7 @@ public class TestHFileArchiving {
     List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
     // make sure we only have 1 region serving this table
     assertEquals(1, servingRegions.size());
-    HRegion region = servingRegions.get(0);
+    Region region = servingRegions.get(0);
 
     // get the parent RS and monitor
     HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
@@ -242,7 +242,7 @@ public class TestHFileArchiving {
     UTIL.loadRegion(region, TEST_FAM);
 
     // get the hfiles in the region
-    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
+    List<Region> regions = hrs.getOnlineRegions(TABLE_NAME);
     assertEquals("More that 1 region for test table.", 1, regions.size());
 
     region = regions.get(0);
@@ -257,7 +257,8 @@ public class TestHFileArchiving {
     clearArchiveDirectory();
 
     // then get the current store files
-    List<String> storeFiles = getRegionStoreFiles(region);
+    byte[][]columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
+    List<String> storeFiles = region.getStoreFileList(columns);
 
     // then delete the table so the hfiles get archived
     UTIL.deleteTable(TABLE_NAME);
@@ -310,7 +311,7 @@ public class TestHFileArchiving {
     List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
     // make sure we only have 1 region serving this table
     assertEquals(1, servingRegions.size());
-    HRegion region = servingRegions.get(0);
+    Region region = servingRegions.get(0);
 
     // get the parent RS and monitor
     HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
@@ -321,7 +322,7 @@ public class TestHFileArchiving {
     UTIL.loadRegion(region, TEST_FAM);
 
     // get the hfiles in the region
-    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
+    List<Region> regions = hrs.getOnlineRegions(TABLE_NAME);
     assertEquals("More that 1 region for test table.", 1, regions.size());
 
     region = regions.get(0);
@@ -336,7 +337,8 @@ public class TestHFileArchiving {
     clearArchiveDirectory();
 
     // then get the current store files
-    List<String> storeFiles = getRegionStoreFiles(region);
+    byte[][]columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
+    List<String> storeFiles = region.getStoreFileList(columns);
 
     // then delete the table so the hfiles get archived
     UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
@@ -449,19 +451,4 @@ public class TestHFileArchiving {
     }
     return fileNames;
   }
-
-  private List<String> getRegionStoreFiles(final HRegion region) throws IOException {
-    Path regionDir = region.getRegionFileSystem().getRegionDir();
-    FileSystem fs = region.getRegionFileSystem().getFileSystem();
-    List<String> storeFiles = getAllFileNames(fs, regionDir);
-    // remove all the non-storefile named files for the region
-    for (int i = 0; i < storeFiles.size(); i++) {
-      String file = storeFiles.get(i);
-      if (file.contains(HRegionFileSystem.REGION_INFO_FILE) || file.contains("wal")) {
-        storeFiles.remove(i--);
-      }
-    }
-    storeFiles.remove(HRegionFileSystem.REGION_INFO_FILE);
-    return storeFiles;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 772c345..eba3c0b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -170,7 +170,7 @@ public class TestZooKeeperTableArchiveClient {
 
     // create the region
     HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
-    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
+    Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
 
     loadFlushAndCompact(region, TEST_FAM);
 
@@ -220,12 +220,12 @@ public class TestZooKeeperTableArchiveClient {
 
     // create the region
     HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
-    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
+    Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
     loadFlushAndCompact(region, TEST_FAM);
 
     // create the another table that we don't archive
     hcd = new HColumnDescriptor(TEST_FAM);
-    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
+    Region otherRegion = UTIL.createTestRegion(otherTable, hcd);
     loadFlushAndCompact(otherRegion, TEST_FAM);
 
     // get the current hfiles in the archive directory
@@ -379,7 +379,7 @@ public class TestZooKeeperTableArchiveClient {
     return allFiles;
   }
 
-  private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
+  private void loadFlushAndCompact(Region region, byte[] family) throws IOException {
     // create two hfiles in the region
     createHFileInRegion(region, family);
     createHFileInRegion(region, family);
@@ -391,7 +391,7 @@ public class TestZooKeeperTableArchiveClient {
 
     // compact the two files into one file to get files in the archive
     LOG.debug("Compacting stores");
-    region.compactStores(true);
+    region.compact(true);
   }
 
   /**
@@ -400,13 +400,13 @@ public class TestZooKeeperTableArchiveClient {
    * @param columnFamily family for which to add data
    * @throws IOException
    */
-  private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
+  private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
     // put one row in the region
     Put p = new Put(Bytes.toBytes("row"));
     p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
     region.put(p);
     // flush the region to make a store file
-    region.flushcache();
+    region.flush(true);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 460ac19..710cf60 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -545,11 +545,8 @@ public class TestAdmin2 {
         + DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");
 
     // flush all regions
-
-    List<HRegion> regions = new ArrayList<HRegion>(regionServer
-        .getOnlineRegionsLocalContext());
-    for (HRegion r : regions) {
-      r.flushcache();
+    for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+      r.flush(true);
     }
     admin.rollWALWriter(regionServer.getServerName());
     int count = DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
index 82f62e4..953f641 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -91,7 +92,7 @@ public class TestClientPushback {
     HTable table = (HTable) conn.getTable(tablename);
 
     HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
-    HRegion region = rs.getOnlineRegions(tablename).get(0);
+    Region region = rs.getOnlineRegions(tablename).get(0);
 
     LOG.debug("Writing some data to "+tablename);
     // write some data
@@ -101,7 +102,7 @@ public class TestClientPushback {
     table.flushCommits();
 
     // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data
-    int load = (int)((region.addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes);
+    int load = (int)((((HRegion)region).addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes);
     LOG.debug("Done writing some data to "+tablename);
 
     // get the stats for the region hosting our table
@@ -114,7 +115,7 @@ public class TestClientPushback {
     assertNotNull( "No stats configured for the client!", stats);
     // get the names so we can query the stats
     ServerName server = rs.getServerName();
-    byte[] regionName = region.getRegionName();
+    byte[] regionName = region.getRegionInfo().getRegionName();
 
     // check to see we found some load on the memstore
     ServerStatistics serverStats = stats.getServerStatsForTesting(server);
@@ -125,8 +126,8 @@ public class TestClientPushback {
     // check that the load reported produces a nonzero delay
     long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats);
     assertNotEquals("Reported load does not produce a backoff", backoffTime, 0);
-    LOG.debug("Backoff calculated for " + region.getRegionNameAsString() + " @ " + server +
-      " is " + backoffTime);
+    LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " +
+      server + " is " + backoffTime);
 
     // Reach into the connection and submit work directly to AsyncProcess so we can
     // monitor how long the submission was delayed via a callback

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index f58bae1..18c74ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateR
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -4180,7 +4181,7 @@ public class TestFromClientSide {
     // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
     // in Store.rowAtOrBeforeFromStoreFile
     String regionName = table.getRegionLocations().firstKey().getEncodedName();
-    HRegion region =
+    Region region =
         TEST_UTIL.getRSForFirstRegionInTable(tableAname).getFromOnlineRegions(regionName);
     Put put1 = new Put(firstRow);
     Put put2 = new Put(secondRow);
@@ -4199,7 +4200,7 @@ public class TestFromClientSide {
     table.put(put2);
     table.put(put3);
     table.put(put4);
-    region.flushcache();
+    region.flush(true);
     Result result = null;
 
     // Test before first that null is returned
@@ -5051,8 +5052,9 @@ public class TestFromClientSide {
     HTable table = TEST_UTIL.createTable(tableName, FAMILY);
     // get the block cache and region
     String regionName = table.getRegionLocations().firstKey().getEncodedName();
-    HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
-    Store store = region.getStores().values().iterator().next();
+    Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName)
+      .getFromOnlineRegions(regionName);
+    Store store = region.getStores().iterator().next();
     CacheConfig cacheConf = store.getCacheConfig();
     cacheConf.setCacheDataOnWrite(true);
     cacheConf.setEvictOnClose(true);
@@ -5087,7 +5089,7 @@ public class TestFromClientSide {
     assertEquals(startBlockMiss, cache.getStats().getMissCount());
     // flush the data
     System.out.println("Flushing cache");
-    region.flushcache();
+    region.flush(true);
     // expect one more block in cache, no change in hits/misses
     long expectedBlockCount = startBlockCount + 1;
     long expectedBlockHits = startBlockHits;
@@ -5114,7 +5116,7 @@ public class TestFromClientSide {
     assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
     // flush, one new block
     System.out.println("Flushing cache");
-    region.flushcache();
+    region.flush(true);
     assertEquals(++expectedBlockCount, cache.getBlockCount());
     assertEquals(expectedBlockHits, cache.getStats().getHitCount());
     assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
@@ -5122,7 +5124,7 @@ public class TestFromClientSide {
     System.out.println("Compacting");
     assertEquals(2, store.getStorefilesCount());
     store.triggerMajorCompaction();
-    region.compactStores();
+    region.compact(true);
     waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
     assertEquals(1, store.getStorefilesCount());
     expectedBlockCount -= 2; // evicted two blocks, cached none

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 0b08562..53b36e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -64,8 +64,8 @@ import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -989,9 +989,9 @@ public class TestHCM {
        ServerName destServerName = destServer.getServerName();
 
        //find another row in the cur server that is less than ROW_X
-       List<HRegion> regions = curServer.getOnlineRegions(TABLE_NAME3);
+       List<Region> regions = curServer.getOnlineRegions(TABLE_NAME3);
        byte[] otherRow = null;
-       for (HRegion region : regions) {
+       for (Region region : regions) {
          if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName())
              && Bytes.BYTES_COMPARATOR.compare(region.getRegionInfo().getStartKey(), ROW_X) < 0) {
            otherRow = region.getRegionInfo().getStartKey();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java
index d85ef5d..c9a628a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationW
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.SumRequest;
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.SumResponse;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.RpcCallback;
@@ -87,9 +87,9 @@ implements Coprocessor, CoprocessorService  {
     int sumResult = 0;
     InternalScanner scanner = null;
     try {
-      HRegion region = this.env.getRegion();
+      Region region = this.env.getRegion();
       // for the last region in the table, return null to test null handling
-      if (Bytes.equals(region.getEndKey(), HConstants.EMPTY_END_ROW)) {
+      if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) {
         done.run(null);
         return;
       }
@@ -123,6 +123,6 @@ implements Coprocessor, CoprocessorService  {
     }
     done.run(SumResponse.newBuilder().setSum(sumResult).build());
     LOG.info("Returning sum " + sumResult + " for region " +
-        Bytes.toStringBinary(env.getRegion().getRegionName()));
+        Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java
index 3a38297..0c4d076 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java
@@ -34,9 +34,9 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationW
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.SumRequest;
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.SumResponse;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.RpcCallback;
@@ -88,9 +88,9 @@ implements Coprocessor, CoprocessorService  {
     int sumResult = 0;
     InternalScanner scanner = null;
     try {
-      HRegion region = this.env.getRegion();
+      Region region = this.env.getRegion();
       // throw an exception for requests to the last region in the table, to test error handling
-      if (Bytes.equals(region.getEndKey(), HConstants.EMPTY_END_ROW)) {
+      if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) {
         throw new DoNotRetryIOException("An expected exception");
       }
       scanner = region.getScanner(scan);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
index 7100ae7..f4981f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
@@ -54,12 +54,12 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.Leases;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -151,8 +151,8 @@ public class SimpleRegionObserver extends BaseRegionObserver {
     // from external packages
     RegionCoprocessorEnvironment re = (RegionCoprocessorEnvironment)e;
     Leases leases = re.getRegionServerServices().getLeases();
-    leases.createLease(re.getRegion().getRegionNameAsString(), 2000, null);
-    leases.cancelLease(re.getRegion().getRegionNameAsString());
+    leases.createLease(re.getRegion().getRegionInfo().getRegionNameAsString(), 2000, null);
+    leases.cancelLease(re.getRegion().getRegionInfo().getRegionNameAsString());
   }
 
   @Override
@@ -229,7 +229,7 @@ public class SimpleRegionObserver extends BaseRegionObserver {
   }
   
   @Override
-  public void postSplit(ObserverContext<RegionCoprocessorEnvironment> c, HRegion l, HRegion r) {
+  public void postSplit(ObserverContext<RegionCoprocessorEnvironment> c, Region l, Region r) {
     ctPostSplit.incrementAndGet();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
index 140c3b9..6a5080b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.coprocessor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -163,12 +162,12 @@ public class TestClassLoading {
     // verify that the coprocessors were loaded
     boolean foundTableRegion=false;
     boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true;
-    Map<HRegion, Set<ClassLoader>> regionsActiveClassLoaders =
-        new HashMap<HRegion, Set<ClassLoader>>();
+    Map<Region, Set<ClassLoader>> regionsActiveClassLoaders =
+        new HashMap<Region, Set<ClassLoader>>();
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
+    for (Region region:
         hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
+      if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
         foundTableRegion = true;
         CoprocessorEnvironment env;
         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
@@ -206,7 +205,7 @@ public class TestClassLoading {
     //check if region active classloaders are shared across all RS regions
     Set<ClassLoader> externalClassLoaders = new HashSet<ClassLoader>(
       CoprocessorClassLoader.getAllCached());
-    for (Map.Entry<HRegion, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
+    for (Map.Entry<Region, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
       assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached."
         + " ClassLoader Cache:" + externalClassLoaders
         + " Region ClassLoaders:" + regionCP.getValue(),
@@ -235,9 +234,8 @@ public class TestClassLoading {
     // verify that the coprocessor was loaded
     boolean found = false;
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
-        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(cpName3)) {
+    for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
+      if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) {
         found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
       }
     }
@@ -261,9 +259,8 @@ public class TestClassLoading {
     // verify that the coprocessor was loaded correctly
     boolean found = false;
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
-        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(cpName4)) {
+    for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
+      if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) {
         Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4);
         if (cp != null) {
           found = true;
@@ -333,9 +330,8 @@ public class TestClassLoading {
         found6_k4 = false;
 
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
-        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
+    for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
+      if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
         found_1 = found_1 ||
             (region.getCoprocessorHost().findCoprocessor(cpName1) != null);
         found_2 = found_2 ||
@@ -422,9 +418,8 @@ public class TestClassLoading {
     boolean found1 = false, found2 = false, found2_k1 = false,
         found2_k2 = false, found2_k3 = false;
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
-        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
+    for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
+      if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
         CoprocessorEnvironment env;
         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
         if (env != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
index 75fe93d..6deade8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
@@ -231,7 +232,7 @@ public class TestCoprocessorInterface {
       preSplitWithSplitRowCalled = true;
     }
     @Override
-    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r) {
+    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, Region l, Region r) {
       postSplitCalled = true;
     }
 
@@ -297,20 +298,19 @@ public class TestCoprocessorInterface {
     byte [][] families = { fam1, fam2, fam3 };
 
     Configuration hc = initSplit();
-    HRegion region = initHRegion(tableName, name.getMethodName(), hc,
+    Region region = initHRegion(tableName, name.getMethodName(), hc,
       new Class<?>[]{}, families);
 
     for (int i = 0; i < 3; i++) {
       HBaseTestCase.addContent(region, fam3);
-      region.flushcache();
+      region.flush(true);
     }
 
-    region.compactStores();
-
-    byte [] splitRow = region.checkSplit();
+    region.compact(false);
 
+    byte [] splitRow = ((HRegion)region).checkSplit();
     assertNotNull(splitRow);
-    HRegion [] regions = split(region, splitRow);
+    Region [] regions = split(region, splitRow);
     for (int i = 0; i < regions.length; i++) {
       regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
     }
@@ -336,7 +336,7 @@ public class TestCoprocessorInterface {
     // now have all Environments fail
     for (int i = 0; i < regions.length; i++) {
       try {
-        byte [] r = regions[i].getStartKey();
+        byte [] r = regions[i].getRegionInfo().getStartKey();
         if (r == null || r.length <= 0) {
           // Its the start row.  Can't ask for null.  Ask for minimal key instead.
           r = new byte [] {0};
@@ -376,19 +376,19 @@ public class TestCoprocessorInterface {
     byte [][] families = { fam1, fam2, fam3 };
 
     Configuration hc = initSplit();
-    HRegion region = initHRegion(tableName, name.getMethodName(), hc,
+    Region region = initHRegion(tableName, name.getMethodName(), hc,
       new Class<?>[]{CoprocessorImpl.class}, families);
     for (int i = 0; i < 3; i++) {
       HBaseTestCase.addContent(region, fam3);
-      region.flushcache();
+      region.flush(true);
     }
 
-    region.compactStores();
+    region.compact(false);
 
-    byte [] splitRow = region.checkSplit();
+    byte [] splitRow = ((HRegion)region).checkSplit();
 
     assertNotNull(splitRow);
-    HRegion [] regions = split(region, splitRow);
+    Region [] regions = split(region, splitRow);
     for (int i = 0; i < regions.length; i++) {
       regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
     }
@@ -423,10 +423,10 @@ public class TestCoprocessorInterface {
     }
   }
 
-  HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
+  Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
       throws IOException {
     //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
-    HRegion r = HRegion.openHRegion(closedRegion, null);
+    Region r = HRegion.openHRegion(closedRegion, null);
 
     // this following piece is a hack. currently a coprocessorHost
     // is secretly loaded at OpenRegionHandler. we don't really
@@ -434,7 +434,7 @@ public class TestCoprocessorInterface {
     // and set it to region.
     Configuration conf = TEST_UTIL.getConfiguration();
     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
-    r.setCoprocessorHost(host);
+    ((HRegion)r).setCoprocessorHost(host);
 
     for (Class<?> implClass : implClasses) {
       host.load(implClass, Coprocessor.PRIORITY_USER, conf);
@@ -450,7 +450,7 @@ public class TestCoprocessorInterface {
     return r;
   }
 
-  HRegion initHRegion (TableName tableName, String callingMethod,
+  Region initHRegion (TableName tableName, String callingMethod,
       Configuration conf, Class<?> [] implClasses, byte [][] families)
       throws IOException {
     HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -459,11 +459,11 @@ public class TestCoprocessorInterface {
     }
     HRegionInfo info = new HRegionInfo(tableName, null, null, false);
     Path path = new Path(DIR + callingMethod);
-    HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
 
     // this following piece is a hack.
     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
-    r.setCoprocessorHost(host);
+    ((HRegion)r).setCoprocessorHost(host);
 
     for (Class<?> implClass : implClasses) {
       host.load(implClass, Coprocessor.PRIORITY_USER, conf);
@@ -497,10 +497,8 @@ public class TestCoprocessorInterface {
     return TEST_UTIL.getConfiguration();
   }
 
-  private HRegion [] split(final HRegion r, final byte [] splitRow)
-      throws IOException {
-
-    HRegion[] regions = new HRegion[2];
+  private Region [] split(final Region r, final byte [] splitRow) throws IOException {
+    Region[] regions = new Region[2];
 
     SplitTransaction st = new SplitTransaction(r, splitRow);
     int i = 0;
@@ -513,18 +511,18 @@ public class TestCoprocessorInterface {
       Server mockServer = Mockito.mock(Server.class);
       when(mockServer.getConfiguration()).thenReturn(
           TEST_UTIL.getConfiguration());
-      PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
-      for (HRegion each_daughter: daughters) {
+      PairOfSameType<Region> daughters = st.execute(mockServer, null);
+      for (Region each_daughter: daughters) {
         regions[i] = each_daughter;
         i++;
       }
     } catch (IOException ioe) {
-      LOG.info("Split transaction of " + r.getRegionNameAsString() +
+      LOG.info("Split transaction of " + r.getRegionInfo().getRegionNameAsString() +
           " failed:" + ioe.getMessage());
       assertTrue(false);
     } catch (RuntimeException e) {
       LOG.info("Failed rollback of failed split of " +
-          r.getRegionNameAsString() + e.getMessage());
+          r.getRegionInfo().getRegionNameAsString() + e.getMessage());
     }
 
     assertTrue(i == 2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 6c7552a..00808bd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.ScanType;
@@ -143,7 +144,7 @@ public class TestRegionObserverScannerOpenHook {
     }
   }
 
-  HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
+  Region initHRegion(byte[] tableName, String callingMethod, Configuration conf,
       byte[]... families) throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
     for (byte[] family : families) {
@@ -170,7 +171,7 @@ public class TestRegionObserverScannerOpenHook {
     byte[][] FAMILIES = new byte[][] { A };
 
     Configuration conf = HBaseConfiguration.create();
-    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
+    Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
     RegionCoprocessorHost h = region.getCoprocessorHost();
     h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
@@ -195,7 +196,7 @@ public class TestRegionObserverScannerOpenHook {
     byte[][] FAMILIES = new byte[][] { A };
 
     Configuration conf = HBaseConfiguration.create();
-    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
+    Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
     RegionCoprocessorHost h = region.getCoprocessorHost();
     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
@@ -204,7 +205,7 @@ public class TestRegionObserverScannerOpenHook {
     Put put = new Put(ROW);
     put.add(A, A, A);
     region.put(put);
-    region.flushcache();
+    region.flush(true);
     Get get = new Get(ROW);
     Result r = region.get(get);
     assertNull(
@@ -272,10 +273,10 @@ public class TestRegionObserverScannerOpenHook {
     table.put(put);
 
     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
-    List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
+    List<Region> regions = rs.getOnlineRegions(desc.getTableName());
     assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
-    HRegion region = regions.get(0);
-    admin.flushRegion(region.getRegionName());
+    Region region = regions.get(0);
+    admin.flushRegion(region.getRegionInfo().getRegionName());
     CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
         .getCompactionStateChangeLatch();
 
@@ -283,7 +284,7 @@ public class TestRegionObserverScannerOpenHook {
     put = new Put(Bytes.toBytes("anotherrow"));
     put.add(A, A, A);
     table.put(put);
-    admin.flushRegion(region.getRegionName());
+    admin.flushRegion(region.getRegionInfo().getRegionName());
 
     // run a compaction, which normally would should get rid of the data
     // wait for the compaction checker to complete

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
index 2e6eabc..de43feb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
 import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
@@ -90,7 +90,7 @@ public class TestRegionServerObserver {
       desc.addFamily(new HColumnDescriptor(FAM));
       admin.createTable(desc, new byte[][] { Bytes.toBytes("row") });
       assertFalse(regionServerObserver.wasRegionMergeCalled());
-      List<HRegion> regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME));
+      List<Region> regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME));
       admin.mergeRegions(regions.get(0).getRegionInfo().getEncodedNameAsBytes(), regions.get(1)
           .getRegionInfo().getEncodedNameAsBytes(), true);
       int regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size();
@@ -130,18 +130,18 @@ public class TestRegionServerObserver {
     }
 
     @Override
-    public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, HRegion regionA,
-        HRegion regionB) throws IOException {
+    public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
+        Region regionB) throws IOException {
       preMergeCalled = true;
     }
 
     @Override
     public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException {
+        Region regionA, Region regionB, List<Mutation> metaEntries) throws IOException {
       preMergeBeforePONRCalled = true;
       RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
       HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
-      List<HRegion> onlineRegions =
+      List<Region> onlineRegions =
           rs.getOnlineRegions(TableName.valueOf("testRegionServerObserver_2"));
       rmt = new RegionMergeTransaction(onlineRegions.get(0), onlineRegions.get(1), true);
       if (!rmt.prepare(rs)) {
@@ -159,7 +159,7 @@ public class TestRegionServerObserver {
 
     @Override
     public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        HRegion regionA, HRegion regionB, HRegion mr) throws IOException {
+        Region regionA, Region regionB, Region mr) throws IOException {
       preMergeAfterPONRCalled = true;
       RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
       HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
@@ -168,19 +168,19 @@ public class TestRegionServerObserver {
 
     @Override
     public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        HRegion regionA, HRegion regionB) throws IOException {
+        Region regionA, Region regionB) throws IOException {
       preRollBackMergeCalled = true;
     }
 
     @Override
     public void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        HRegion regionA, HRegion regionB) throws IOException {
+        Region regionA, Region regionB) throws IOException {
       postRollBackMergeCalled = true;
     }
 
     @Override
-    public void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, HRegion regionA,
-        HRegion regionB, HRegion mergedRegion) throws IOException {
+    public void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
+        Region regionB, Region mergedRegion) throws IOException {
       postMergeCalled = true;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
index 9d448b4..82ea5d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.filter.FilterList.Operator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -67,7 +68,7 @@ import com.google.common.base.Throwables;
 @Category({FilterTests.class, SmallTests.class})
 public class TestFilter {
   private final static Log LOG = LogFactory.getLog(TestFilter.class);
-  private HRegion region;
+  private Region region;
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
   //
@@ -165,7 +166,7 @@ public class TestFilter {
     }
 
     // Flush
-    this.region.flushcache();
+    this.region.flush(true);
 
     // Insert second half (reverse families)
     for(byte [] ROW : ROWS_ONE) {
@@ -242,7 +243,7 @@ public class TestFilter {
       this.region.put(p);
     }
     // Flush
-    this.region.flushcache();
+    this.region.flush(true);
 
     // Insert second half (reverse families)
     for (byte[] ROW : ROWS_THREE) {
@@ -1451,7 +1452,7 @@ public class TestFilter {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter"));
     htd.addFamily(new HColumnDescriptor(family));
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
+    Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
         TEST_UTIL.getConfiguration(), htd);
 
     for(int i=0; i<5; i++) {
@@ -1460,7 +1461,7 @@ public class TestFilter {
       p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i)));
       testRegion.put(p);
     }
-    testRegion.flushcache();
+    testRegion.flush(true);
 
     // rows starting with "b"
     PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
@@ -1486,8 +1487,8 @@ public class TestFilter {
     assertEquals(2, resultCount);
     scanner.close();
 
-    WAL wal = testRegion.getWAL();
-    testRegion.close();
+    WAL wal = ((HRegion)testRegion).getWAL();
+    ((HRegion)testRegion).close();
     wal.close();
   }
 
@@ -1820,7 +1821,7 @@ public class TestFilter {
       p.setDurability(Durability.SKIP_WAL);
       p.add(FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]);
       this.region.put(p);
-      this.region.flushcache();
+      this.region.flush(true);
 
       // Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row
       KeyValue [] expectedKVs = {
@@ -2011,7 +2012,7 @@ public class TestFilter {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testNestedFilterListWithSCVF"));
     htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
+    Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
         TEST_UTIL.getConfiguration(), htd);
     for(int i=0; i<10; i++) {
       Put p = new Put(Bytes.toBytes("row" + i));
@@ -2019,7 +2020,7 @@ public class TestFilter {
       p.add(FAMILIES[0], columnStatus, Bytes.toBytes(i%2));
       testRegion.put(p);
     }
-    testRegion.flushcache();
+    testRegion.flush(true);
     // 1. got rows > "row4"
     Filter rowFilter = new RowFilter(CompareOp.GREATER,new BinaryComparator(Bytes.toBytes("row4")));
     Scan s1 = new Scan();
@@ -2095,8 +2096,8 @@ public class TestFilter {
       results.clear();
     }
     assertFalse(NextState.hasMoreValues(scanner.next(results)));
-    WAL wal = testRegion.getWAL();
-    testRegion.close();
+    WAL wal = ((HRegion)testRegion).getWAL();
+    ((HRegion)testRegion).close();
     wal.close();
   }      
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
index c94054b..b88bbbf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
@@ -35,10 +35,11 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -61,7 +62,7 @@ public class TestInvocationRecordFilter {
   private static final String VALUE_PREFIX = "value";
 
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private HRegion region;
+  private Region region;
 
   @Before
   public void setUp() throws Exception {
@@ -79,7 +80,7 @@ public class TestInvocationRecordFilter {
           Bytes.toBytes(VALUE_PREFIX + i));
     }
     this.region.put(put);
-    this.region.flushcache();
+    this.region.flush(true);
   }
 
   @Test
@@ -151,8 +152,8 @@ public class TestInvocationRecordFilter {
 
   @After
   public void tearDown() throws Exception {
-    WAL wal = region.getWAL();
-    region.close();
+    WAL wal = ((HRegion)region).getWAL();
+    ((HRegion)region).close();
     wal.close();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
index 613d1ea..504350c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -285,7 +286,7 @@ public class TestBlockReorder {
 
     int nbTest = 0;
     while (nbTest < 10) {
-      final List<HRegion> regions = targetRs.getOnlineRegions(h.getName());
+      final List<Region> regions = targetRs.getOnlineRegions(h.getName());
       final CountDownLatch latch = new CountDownLatch(regions.size());
       // listen for successful log rolls
       final WALActionsListener listener = new WALActionsListener.Base() {
@@ -294,8 +295,8 @@ public class TestBlockReorder {
               latch.countDown();
             }
           };
-      for (HRegion region : regions) {
-        region.getWAL().registerWALActionsListener(listener);
+      for (Region region : regions) {
+        ((HRegion)region).getWAL().registerWALActionsListener(listener);
       }
 
       htu.getHBaseAdmin().rollWALWriter(targetRs.getServerName());
@@ -308,8 +309,8 @@ public class TestBlockReorder {
             "tests fail, it's probably because we should still be waiting.");
         Thread.currentThread().interrupt();
       }
-      for (HRegion region : regions) {
-        region.getWAL().unregisterWALActionsListener(listener);
+      for (Region region : regions) {
+        ((HRegion)region).getWAL().unregisterWALActionsListener(listener);
       }
 
       // We need a sleep as the namenode is informed asynchronously

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index e087457..5ccb206 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -113,7 +113,7 @@ public class TestEncodedSeekers {
         setBlocksize(BLOCK_SIZE).
         setBloomFilterType(BloomType.NONE).
         setCompressTags(compressTags);
-    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
+    Region region = testUtil.createTestRegion(TABLE_NAME, hcd);
 
     //write the data, but leave some in the memstore
     doPuts(region);
@@ -122,10 +122,9 @@ public class TestEncodedSeekers {
     doGets(region);
 
     //verify correctness again after compacting
-    region.compactStores();
+    region.compact(false);
     doGets(region);
 
-
     Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
 
     // Ensure that compactions don't pollute the cache with unencoded blocks
@@ -138,7 +137,7 @@ public class TestEncodedSeekers {
   }
 
 
-  private void doPuts(HRegion region) throws IOException{
+  private void doPuts(Region region) throws IOException{
     LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
      for (int i = 0; i < NUM_ROWS; ++i) {
       byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
@@ -162,13 +161,13 @@ public class TestEncodedSeekers {
         region.put(put);
       }
       if (i % NUM_ROWS_PER_FLUSH == 0) {
-        region.flushcache();
+        region.flush(true);
       }
     }
   }
 
 
-  private void doGets(HRegion region) throws IOException{
+  private void doGets(Region region) throws IOException{
     for (int i = 0; i < NUM_ROWS; ++i) {
       final byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
index 6a64119..1eda567 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -66,7 +66,7 @@ public class TestPrefixTree {
 
   private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
 
-  private HRegion region;
+  private Region region;
 
   @Before
   public void setUp() throws Exception {
@@ -101,7 +101,7 @@ public class TestPrefixTree {
     put = new Put(row4_bytes);
     put.addColumn(fam, qual2, Bytes.toBytes("c2-value-3"));
     region.put(put);
-    region.flushcache();
+    region.flush(true);
     String[] rows = new String[3];
     rows[0] = row1;
     rows[1] = row2;
@@ -182,7 +182,7 @@ public class TestPrefixTree {
     region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
     region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
     region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
-    region.flushcache();
+    region.flush(true);
     Scan scan = new Scan(Bytes.toBytes("obj29995"));
     RegionScanner scanner = region.getScanner(scan);
     List<Cell> cells = new ArrayList<Cell>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 7ec7e08..00639cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -433,7 +434,7 @@ public class TestCacheOnWrite {
     final String cf = "myCF";
     final byte[] cfBytes = Bytes.toBytes(cf);
     final int maxVersions = 3;
-    HRegion region = TEST_UTIL.createTestRegion(table, 
+    Region region = TEST_UTIL.createTestRegion(table, 
         new HColumnDescriptor(cf)
             .setCompressionType(compress)
             .setBloomFilterType(BLOOM_TYPE)
@@ -467,18 +468,18 @@ public class TestCacheOnWrite {
         p.setDurability(Durability.ASYNC_WAL);
         region.put(p);
       }
-      region.flushcache();
+      region.flush(true);
     }
     clearBlockCache(blockCache);
     assertEquals(0, blockCache.getBlockCount());
-    region.compactStores();
+    region.compact(false);
     LOG.debug("compactStores() returned");
 
     for (CachedBlock block: blockCache) {
       assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
       assertNotEquals(BlockType.DATA, block.getBlockType());
     }
-    region.close();
+    ((HRegion)region).close();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index 2af3a6e..7625842 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Before;
 import org.junit.Test;
@@ -111,7 +111,7 @@ public class TestForceCacheImportantBlocks {
       setBloomFilterType(BLOOM_TYPE);
     hcd.setBlocksize(BLOCK_SIZE);
     hcd.setBlockCacheEnabled(cfCacheEnabled);
-    HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
+    Region region = TEST_UTIL.createTestRegion(TABLE, hcd);
     BlockCache cache = region.getStore(hcd.getName()).getCacheConfig().getBlockCache();
     CacheStats stats = cache.getStats();
     writeTestData(region);
@@ -128,7 +128,7 @@ public class TestForceCacheImportantBlocks {
     else assertTrue(stats.getMissCount() > missCount);
   }
 
-  private void writeTestData(HRegion region) throws IOException {
+  private void writeTestData(Region region) throws IOException {
     for (int i = 0; i < NUM_ROWS; ++i) {
       Put put = new Put(Bytes.toBytes("row" + i));
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
@@ -139,7 +139,7 @@ public class TestForceCacheImportantBlocks {
       }
       region.put(put);
       if ((i + 1) % ROWS_PER_HFILE == 0) {
-        region.flushcache();
+        region.flush(true);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f4b661/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
index 4f29af3..6baadbb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
@@ -36,11 +36,11 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Test;
@@ -100,7 +100,7 @@ public class TestScannerSelectionUsingKeyRange {
     HTableDescriptor htd = new HTableDescriptor(TABLE);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(TABLE);
-    HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf,
+    Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf,
         htd);
 
     for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
@@ -112,7 +112,7 @@ public class TestScannerSelectionUsingKeyRange {
         }
         region.put(put);
       }
-      region.flushcache();
+      region.flush(true);
     }
 
     Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));


Mime
View raw message