hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [13/16] hbase git commit: HBASE-12972 Region, a supportable public/evolving subset of HRegion
Date Tue, 31 Mar 2015 01:40:32 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
new file mode 100644
index 0000000..441a93b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -0,0 +1,680 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.DroppedSnapshotException;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.IsolationLevel;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.conf.ConfigurationObserver;
+import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
+
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+/**
+ * Regions store data for a certain region of a table.  It stores all columns
+ * for each row. A given table consists of one or more Regions.
+ *
+ * <p>An Region is defined by its table and its key extent.
+ *
+ * <p>Locking at the Region level serves only one purpose: preventing the
+ * region from being closed (and consequently split) while other operations
+ * are ongoing. Each row level operation obtains both a row lock and a region
+ * read lock for the duration of the operation. While a scanner is being
+ * constructed, getScanner holds a read lock. If the scanner is successfully
+ * constructed, it holds a read lock until it is closed. A close takes out a
+ * write lock and consequently will block for ongoing operations and will block
+ * new operations from starting while the close is in progress.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+@InterfaceStability.Evolving
+public interface Region extends ConfigurationObserver {
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Region state
+
+  /** @return region information for this region */
+  HRegionInfo getRegionInfo();
+
+  /** @return table descriptor for this region */
+  HTableDescriptor getTableDesc();
+
+  /** @return true if region is available (not closed and not closing) */
+  boolean isAvailable();
+
+  /** @return true if region is closed */
+  boolean isClosed();
+
+  /** @return True if closing process has started */
+  boolean isClosing();
+
+  /** @return True if region is in recovering state */
+  boolean isRecovering();
+
+  /** @return True if region is read only */
+  boolean isReadOnly();
+
+  /**
+   * Return the list of Stores managed by this region
+   * <p>Use with caution.  Exposed for use of fixup utilities.
+   * @return a list of the Stores managed by this region
+   */
+  List<Store> getStores();
+
+  /**
+   * Return the Store for the given family
+   * <p>Use with caution.  Exposed for use of fixup utilities.
+   * @return the Store for the given family
+   */
+  Store getStore(byte[] family);
+
+  /** @return list of store file names for the given families */
+  List<String> getStoreFileList(byte [][] columns);
+
+  /**
+   * Check the region's underlying store files, open the files that have not
+   * been opened yet, and remove the store file readers for store files no
+   * longer available.
+   * @throws IOException
+   */
+  boolean refreshStoreFiles() throws IOException;
+
+  /** @return the latest sequence number that was read from storage when this region was opened */
+  long getOpenSeqNum();
+
+  /** @return the max sequence id of flushed data on this region */
+  long getMaxFlushedSeqId();
+
+  /** @return the oldest sequence id found in the store for the given family */
+  public long getOldestSeqIdOfStore(byte[] familyName);
+
+  /**
+   * This can be used to determine the last time all files of this region were major compacted.
+   * @param majorCompactioOnly Only consider HFile that are the result of major compaction
+   * @return the timestamp of the oldest HFile for all stores of this region
+   */
+  long getOldestHfileTs(boolean majorCompactioOnly) throws IOException;
+
+  /** 
+   * @return map of column family names to max sequence id that was read from storage when this
+   * region was opened
+   */
+  public Map<byte[], Long> getMaxStoreSeqId();
+
+  /** @return true if loading column families on demand by default */
+  boolean isLoadingCfsOnDemandDefault();
+
+  /** @return readpoint considering given IsolationLevel */
+  long getReadpoint(IsolationLevel isolationLevel);
+
+  /**
+   * @return The earliest time a store in the region was flushed. All
+   *         other stores in the region would have been flushed either at, or
+   *         after this time.
+   */
+  long getEarliestFlushTimeForAllStores();
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Metrics
+  
+  /** @return read requests count for this region */
+  long getReadRequestsCount();
+
+  /**
+   * Update the read request count for this region
+   * @param i increment
+   */
+  void updateReadRequestsCount(long i);
+
+  /** @return write request count for this region */
+  long getWriteRequestsCount();
+
+  /**
+   * Update the write request count for this region
+   * @param i increment
+   */
+  void updateWriteRequestsCount(long i);
+
+  /** @return memstore size for this region, in bytes */
+  long getMemstoreSize();
+
+  /** @return the number of mutations processed bypassing the WAL */
+  long getNumMutationsWithoutWAL();
+  
+  /** @return the size of data processed bypassing the WAL, in bytes */
+  long getDataInMemoryWithoutWAL();
+
+  /** @return the number of blocked requests */
+  long getBlockedRequestsCount();
+
+  /** @return the number of checkAndMutate guards that passed */
+  long getCheckAndMutateChecksPassed();
+
+  /** @return the number of failed checkAndMutate guards */
+  long getCheckAndMutateChecksFailed();
+
+  /** @return the MetricsRegion for this region */
+  MetricsRegion getMetrics();
+
+  /** @return the block distribution for all Stores managed by this region */
+  HDFSBlocksDistribution getHDFSBlocksDistribution();
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Locking
+
+  // Region read locks
+
+  /**
+   * Operation enum is used in {@link Region#startRegionOperation} to provide context for
+   * various checks before any region operation begins.
+   */
+  enum Operation {
+    ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE,
+    REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT
+  }
+
+  /**
+   * This method needs to be called before any public call that reads or
+   * modifies data. 
+   * Acquires a read lock and checks if the region is closing or closed.
+   * <p>{@link #closeRegionOperation} MUST then always be called after
+   * the operation has completed, whether it succeeded or failed.
+   * @throws IOException
+   */
+  void startRegionOperation() throws IOException;
+
+  /**
+   * This method needs to be called before any public call that reads or
+   * modifies data. 
+   * Acquires a read lock and checks if the region is closing or closed.
+   * <p>{@link #closeRegionOperation} MUST then always be called after
+   * the operation has completed, whether it succeeded or failed.
+   * @param op The operation is about to be taken on the region
+   * @throws IOException
+   */
+  void startRegionOperation(Operation op) throws IOException;
+
+  /**
+   * Closes the region operation lock.
+   * @throws IOException
+   */
+  void closeRegionOperation() throws IOException;
+
+  // Row write locks
+
+  /**
+   * Row lock held by a given thread.
+   * One thread may acquire multiple locks on the same row simultaneously.
+   * The locks must be released by calling release() from the same thread.
+   */
+  public interface RowLock {
+    /**
+     * Release the given lock.  If there are no remaining locks held by the current thread
+     * then unlock the row and allow other threads to acquire the lock.
+     * @throws IllegalArgumentException if called by a different thread than the lock owning
+     *     thread
+     */
+    void release();
+  }
+
+  /**
+   * Tries to acquire a lock on the given row.
+   * @param waitForLock if true, will block until the lock is available.
+   *        Otherwise, just tries to obtain the lock and returns
+   *        false if unavailable.
+   * @return the row lock if acquired,
+   *   null if waitForLock was false and the lock was not acquired
+   * @throws IOException if waitForLock was true and the lock could not be acquired after waiting
+   */
+  RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException;
+
+  /**
+   * If the given list of row locks is not null, releases all locks.
+   */
+  void releaseRowLocks(List<RowLock> rowLocks);
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Region operations
+
+  /**
+   * Perform one or more append operations on a row.
+   * @param append
+   * @param nonceGroup
+   * @param nonce
+   * @return result of the operation
+   * @throws IOException
+   */
+  Result append(Append append, long nonceGroup, long nonce) throws IOException;
+
+  /**
+   * Perform a batch of mutations.
+   * <p>
+   * Note this supports only Put and Delete mutations and will ignore other types passed.
+   * @param mutations the list of mutations
+   * @param nonceGroup
+   * @param nonce
+   * @return an array of OperationStatus which internally contains the
+   *         OperationStatusCode and the exceptionMessage if any.
+   * @throws IOException
+   */
+  OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce)
+      throws IOException;
+
+  /**
+   * Replay a batch of mutations.
+   * @param mutations mutations to replay.
+   * @param replaySeqId
+   * @return an array of OperationStatus which internally contains the
+   *         OperationStatusCode and the exceptionMessage if any.
+   * @throws IOException
+   */
+   OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException;
+
+  /**
+   * Atomically checks if a row/family/qualifier value matches the expected val
+   * If it does, it performs the row mutations.  If the passed value is null, t
+   * is for the lack of column (ie: non-existence)
+   * @param row to check
+   * @param family column family to check
+   * @param qualifier column qualifier to check
+   * @param compareOp the comparison operator
+   * @param comparator
+   * @param mutation
+   * @param writeToWAL
+   * @return true if mutation was applied, false otherwise
+   * @throws IOException
+   */
+  boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp,
+      ByteArrayComparable comparator, Mutation mutation, boolean writeToWAL) throws IOException;
+
+  /**
+   * Atomically checks if a row/family/qualifier value matches the expected val
+   * If it does, it performs the row mutations.  If the passed value is null, t
+   * is for the lack of column (ie: non-existence)
+   * @param row to check
+   * @param family column family to check
+   * @param qualifier column qualifier to check
+   * @param compareOp the comparison operator
+   * @param comparator
+   * @param mutations
+   * @param writeToWAL
+   * @return true if mutation was applied, false otherwise
+   * @throws IOException
+   */
+  boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp,
+      ByteArrayComparable comparator, RowMutations mutations, boolean writeToWAL)
+      throws IOException;
+
+  /**
+   * Deletes the specified cells/row.
+   * @param delete
+   * @throws IOException
+   */
+  void delete(Delete delete) throws IOException;
+
+  /**
+   * Do a get based on the get parameter.
+   * @param get query parameters
+   * @return result of the operation
+   */
+  Result get(Get get) throws IOException;
+
+  /**
+   * Do a get based on the get parameter.
+   * @param get query parameters
+   * @param withCoprocessor invoke coprocessor or not. We don't want to
+   * always invoke cp.
+   * @return list of cells resulting from the operation
+   */
+  List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
+
+  /**
+   * Return all the data for the row that matches <i>row</i> exactly,
+   * or the one that immediately preceeds it, at or immediately before
+   * <i>ts</i>.
+   * @param row
+   * @param family
+   * @return result of the operation
+   * @throws IOException
+   */
+  Result getClosestRowBefore(byte[] row, byte[] family) throws IOException;
+
+  /**
+   * Return an iterator that scans over the HRegion, returning the indicated
+   * columns and rows specified by the {@link Scan}.
+   * <p>
+   * This Iterator must be closed by the caller.
+   *
+   * @param scan configured {@link Scan}
+   * @return RegionScanner
+   * @throws IOException read exceptions
+   */
+  RegionScanner getScanner(Scan scan) throws IOException;
+
+  /**
+   * Perform one or more increment operations on a row.
+   * @param increment
+   * @param nonceGroup
+   * @param nonce
+   * @return result of the operation
+   * @throws IOException
+   */
+  Result increment(Increment increment, long nonceGroup, long nonce) throws IOException;
+
+  /**
+   * Performs multiple mutations atomically on a single row. Currently
+   * {@link Put} and {@link Delete} are supported.
+   *
+   * @param mutations object that specifies the set of mutations to perform atomically
+   * @throws IOException
+   */
+  void mutateRow(RowMutations mutations) throws IOException;
+
+  /**
+   * Perform atomic mutations within the region.
+   * 
+   * @param mutations The list of mutations to perform.
+   * <code>mutations</code> can contain operations for multiple rows.
+   * Caller has to ensure that all rows are contained in this region.
+   * @param rowsToLock Rows to lock
+   * @param nonceGroup Optional nonce group of the operation (client Id)
+   * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
+   * If multiple rows are locked care should be taken that
+   * <code>rowsToLock</code> is sorted in order to avoid deadlocks.
+   * @throws IOException
+   */
+  void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock,
+      long nonceGroup, long nonce) throws IOException;
+
+  /**
+   * Performs atomic multiple reads and writes on a given row.
+   *
+   * @param processor The object defines the reads and writes to a row.
+   */
+  void processRowsWithLocks(RowProcessor<?,?> processor) throws IOException;
+
+  /**
+   * Performs atomic multiple reads and writes on a given row.
+   *
+   * @param processor The object defines the reads and writes to a row.
+   * @param nonceGroup Optional nonce group of the operation (client Id)
+   * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
+   */
+  void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce)
+      throws IOException;
+
+  /**
+   * Performs atomic multiple reads and writes on a given row.
+   *
+   * @param processor The object defines the reads and writes to a row.
+   * @param timeout The timeout of the processor.process() execution
+   *                Use a negative number to switch off the time bound
+   * @param nonceGroup Optional nonce group of the operation (client Id)
+   * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
+   */
+  void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce)
+      throws IOException;
+
+  /**
+   * Puts some data in the table.
+   * @param put
+   * @throws IOException
+   */
+  void put(Put put) throws IOException;
+
+  /**
+   * Listener class to enable callers of
+   * bulkLoadHFile() to perform any necessary
+   * pre/post processing of a given bulkload call
+   */
+  interface BulkLoadListener {
+
+    /**
+     * Called before an HFile is actually loaded
+     * @param family family being loaded to
+     * @param srcPath path of HFile
+     * @return final path to be used for actual loading
+     * @throws IOException
+     */
+    String prepareBulkLoad(byte[] family, String srcPath) throws IOException;
+
+    /**
+     * Called after a successful HFile load
+     * @param family family being loaded to
+     * @param srcPath path of HFile
+     * @throws IOException
+     */
+    void doneBulkLoad(byte[] family, String srcPath) throws IOException;
+
+    /**
+     * Called after a failed HFile load
+     * @param family family being loaded to
+     * @param srcPath path of HFile
+     * @throws IOException
+     */
+    void failedBulkLoad(byte[] family, String srcPath) throws IOException;
+  }
+
+  /**
+   * Attempts to atomically load a group of hfiles.  This is critical for loading
+   * rows with multiple column families atomically.
+   *
+   * @param familyPaths List of Pair<byte[] column family, String hfilePath>
+   * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
+   * file about to be bulk loaded
+   * @param assignSeqId
+   * @return true if successful, false if failed recoverably
+   * @throws IOException if failed unrecoverably.
+   */
+  boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
+      BulkLoadListener bulkLoadListener) throws IOException;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Coprocessors
+
+  /** @return the coprocessor host */
+  RegionCoprocessorHost getCoprocessorHost();
+
+  /**
+   * Executes a single protocol buffer coprocessor endpoint {@link Service} method using
+   * the registered protocol handlers.  {@link Service} implementations must be registered via the
+   * {@link Region#registerService(com.google.protobuf.Service)}
+   * method before they are available.
+   *
+   * @param controller an {@code RpcContoller} implementation to pass to the invoked service
+   * @param call a {@code CoprocessorServiceCall} instance identifying the service, method,
+   *     and parameters for the method invocation
+   * @return a protocol buffer {@code Message} instance containing the method's result
+   * @throws IOException if no registered service handler is found or an error
+   *     occurs during the invocation
+   * @see org.apache.hadoop.hbase.regionserver.Region#registerService(com.google.protobuf.Service)
+   */
+  Message execService(RpcController controller, CoprocessorServiceCall call) throws IOException;
+
+  /**
+   * Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to
+   * be available for handling
+   * {@link Region#execService(com.google.protobuf.RpcController,
+   *    org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall)}} calls.
+   *
+   * <p>
+   * Only a single instance may be registered per region for a given {@link Service} subclass (the
+   * instances are keyed on {@link com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}.
+   * After the first registration, subsequent calls with the same service name will fail with
+   * a return value of {@code false}.
+   * </p>
+   * @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint
+   * @return {@code true} if the registration was successful, {@code false}
+   * otherwise
+   */
+  boolean registerService(Service instance);
+
+  ///////////////////////////////////////////////////////////////////////////
+  // RowMutation processor support
+
+  /**
+   * Check the collection of families for validity.
+   * @param families
+   * @throws NoSuchColumnFamilyException
+   */
+  void checkFamilies(Collection<byte[]> families) throws NoSuchColumnFamilyException;
+
+  /**
+   * Check the collection of families for valid timestamps
+   * @param familyMap
+   * @param now current timestamp
+   * @throws FailedSanityCheckException
+   */
+  void checkTimestamps(Map<byte[], List<Cell>> familyMap, long now)
+      throws FailedSanityCheckException;
+
+  /**
+   * Prepare a delete for a row mutation processor
+   * @param delete The passed delete is modified by this method. WARNING!
+   * @throws IOException
+   */
+  void prepareDelete(Delete delete) throws IOException;
+
+  /**
+   * Set up correct timestamps in the KVs in Delete object.
+   * <p>Caller should have the row and region locks.
+   * @param mutation
+   * @param familyCellMap
+   * @param now
+   * @throws IOException
+   */
+  void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyCellMap,
+      byte[] now) throws IOException;
+
+  /**
+   * Replace any cell timestamps set to {@link HConstants#LATEST_TIMESTAMP} with the
+   * provided current timestamp.
+   * @param values
+   * @param now
+   */
+  void updateCellTimestamps(final Iterable<List<Cell>> values, final byte[] now)
+      throws IOException;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Flushes, compactions, splits, etc.
+  // Wizards only, please
+
+  interface FlushResult {
+    enum Result {
+      FLUSHED_NO_COMPACTION_NEEDED,
+      FLUSHED_COMPACTION_NEEDED,
+      // Special case where a flush didn't run because there's nothing in the memstores. Used when
+      // bulk loading to know when we can still load even if a flush didn't happen.
+      CANNOT_FLUSH_MEMSTORE_EMPTY,
+      CANNOT_FLUSH
+    }
+    
+    /** @return the detailed result code */
+    Result getResult();
+
+    /** @return true if the memstores were flushed, else false */
+    boolean isFlushSucceeded();
+    
+    /** @return True if the flush requested a compaction, else false */
+    boolean isCompactionNeeded();
+  }
+
+  /**
+   * Flush the cache.
+   *
+   * <p>When this method is called the cache will be flushed unless:
+   * <ol>
+   *   <li>the cache is empty</li>
+   *   <li>the region is closed.</li>
+   *   <li>a flush is already in progress</li>
+   *   <li>writes are disabled</li>
+   * </ol>
+   *
+   * <p>This method may block for some time, so it should not be called from a
+   * time-sensitive thread.
+   * @param force whether we want to force a flush of all stores
+   * @return FlushResult indicating whether the flush was successful or not and if
+   * the region needs compacting
+   *
+   * @throws IOException general io exceptions
+   * @throws DroppedSnapshotException Thrown when abort is required
+   * because a snapshot was not properly persisted.
+   */
+  FlushResult flush(boolean force) throws IOException;
+
+  /**
+   * Synchronously compact all stores in the region.
+   * <p>This operation could block for a long time, so don't call it from a
+   * time-sensitive thread.
+   * <p>Note that no locks are taken to prevent possible conflicts between 
+   * compaction and splitting activities. The regionserver does not normally compact
+   * and split in parallel. However by calling this method you may introduce
+   * unexpected and unhandled concurrency. Don't do this unless you know what
+   * you are doing.
+   *
+   * @param majorCompaction True to force a major compaction regardless of thresholds
+   * @throws IOException
+   */
+  void compact(final boolean majorCompaction) throws IOException;
+
+  /**
+   * Trigger major compaction on all stores in the region.
+   * <p>
+   * Compaction will be performed asynchronously to this call by the RegionServer's
+   * CompactSplitThread. See also {@link Store#triggerMajorCompaction()}
+   * @throws IOException
+   */
+  void triggerMajorCompaction() throws IOException;
+
+  /**
+   * @return if a given region is in compaction now.
+   */
+  CompactionState getCompactionState();
+
+  /** Wait for all current flushes and compactions of the region to complete */
+  void waitForFlushesAndCompactions();
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 83aea3e..badf944 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -73,7 +73,7 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
+import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
@@ -89,7 +89,7 @@ import com.google.protobuf.Service;
 
 /**
  * Implements the coprocessor environment and runtime support for coprocessors
- * loaded within a {@link HRegion}.
+ * loaded within a {@link Region}.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
@@ -107,7 +107,7 @@ public class RegionCoprocessorHost
   static class RegionEnvironment extends CoprocessorHost.Environment
       implements RegionCoprocessorEnvironment {
 
-    private HRegion region;
+    private Region region;
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
@@ -122,7 +122,7 @@ public class RegionCoprocessorHost
      * @param priority chaining priority
      */
     public RegionEnvironment(final Coprocessor impl, final int priority,
-        final int seq, final Configuration conf, final HRegion region,
+        final int seq, final Configuration conf, final Region region,
         final RegionServerServices services, final ConcurrentMap<String, Object> sharedData) {
       super(impl, priority, seq, conf);
       this.region = region;
@@ -140,7 +140,7 @@ public class RegionCoprocessorHost
 
     /** @return the region */
     @Override
-    public HRegion getRegion() {
+    public Region getRegion() {
       return region;
     }
 
@@ -210,7 +210,7 @@ public class RegionCoprocessorHost
   /** The region server services */
   RegionServerServices rsServices;
   /** The region */
-  HRegion region;
+  Region region;
 
   /**
    * Constructor
@@ -218,7 +218,7 @@ public class RegionCoprocessorHost
    * @param rsServices interface to available region server functionality
    * @param conf the configuration
    */
-  public RegionCoprocessorHost(final HRegion region,
+  public RegionCoprocessorHost(final Region region,
       final RegionServerServices rsServices, final Configuration conf) {
     super(rsServices);
     this.conf = conf;
@@ -707,7 +707,7 @@ public class RegionCoprocessorHost
    * @param r the new right-hand daughter region
    * @throws IOException
    */
-  public void postSplit(final HRegion l, final HRegion r) throws IOException {
+  public void postSplit(final Region l, final Region r) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
       @Override
       public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
index cbb8dd8..ec82055 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
@@ -42,10 +42,10 @@ class RegionMergeRequest implements Runnable {
   private final boolean forcible;
   private TableLock tableLock;
 
-  RegionMergeRequest(HRegion a, HRegion b, HRegionServer hrs, boolean forcible) {
+  RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible) {
     Preconditions.checkNotNull(hrs);
-    this.region_a = a;
-    this.region_b = b;
+    this.region_a = (HRegion)a;
+    this.region_b = (HRegion)b;
     this.server = hrs;
     this.forcible = forcible;
   }
@@ -71,7 +71,8 @@ class RegionMergeRequest implements Runnable {
       //acquire a shared read lock on the table, so that table schema modifications
       //do not happen concurrently
       tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getTableName()
-          , "MERGE_REGIONS:" + region_a.getRegionNameAsString() + ", " + region_b.getRegionNameAsString());
+          , "MERGE_REGIONS:" + region_a.getRegionInfo().getRegionNameAsString() + ", " +
+              region_b.getRegionInfo().getRegionNameAsString());
       try {
         tableLock.acquire();
       } catch (IOException ex) {
@@ -134,7 +135,7 @@ class RegionMergeRequest implements Runnable {
         LOG.error("Could not release the table lock (something is really wrong). " 
            + "Aborting this server to avoid holding the lock forever.");
         this.server.abort("Abort; we got an error when releasing the table lock "
-                         + "on " + region_a.getRegionNameAsString());
+                         + "on " + region_a.getRegionInfo().getRegionNameAsString());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 279514c..de0924b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -149,14 +149,14 @@ public class RegionMergeTransaction {
    * @param b region b to merge
    * @param forcible if false, we will only merge adjacent regions
    */
-  public RegionMergeTransaction(final HRegion a, final HRegion b,
+  public RegionMergeTransaction(final Region a, final Region b,
       final boolean forcible) {
     if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) {
-      this.region_a = a;
-      this.region_b = b;
+      this.region_a = (HRegion)a;
+      this.region_b = (HRegion)b;
     } else {
-      this.region_a = b;
-      this.region_b = a;
+      this.region_a = (HRegion)b;
+      this.region_b = (HRegion)a;
     }
     this.forcible = forcible;
     this.mergesdir = region_a.getRegionFileSystem().getMergesDir();
@@ -181,8 +181,8 @@ public class RegionMergeTransaction {
     }
     if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(),
             region_b.getRegionInfo())) {
-      String msg = "Skip merging " + this.region_a.getRegionNameAsString()
-          + " and " + this.region_b.getRegionNameAsString()
+      String msg = "Skip merging " + this.region_a.getRegionInfo().getRegionNameAsString()
+          + " and " + this.region_b.getRegionInfo().getRegionNameAsString()
           + ", because they are not adjacent.";
       LOG.info(msg);
       return false;
@@ -192,18 +192,19 @@ public class RegionMergeTransaction {
     }
     try {
       boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services,
-          region_a.getRegionName());
+          region_a.getRegionInfo().getRegionName());
       if (regionAHasMergeQualifier ||
-          hasMergeQualifierInMeta(services, region_b.getRegionName())) {
-        LOG.debug("Region " + (regionAHasMergeQualifier ? region_a.getRegionNameAsString()
-                : region_b.getRegionNameAsString())
+          hasMergeQualifierInMeta(services, region_b.getRegionInfo().getRegionName())) {
+        LOG.debug("Region " + (regionAHasMergeQualifier ?
+              region_a.getRegionInfo().getRegionNameAsString() :
+                region_b.getRegionInfo().getRegionNameAsString())
             + " is not mergeable because it has merge qualifier in META");
         return false;
       }
     } catch (IOException e) {
       LOG.warn("Failed judging whether merge transaction is available for "
-              + region_a.getRegionNameAsString() + " and "
-              + region_b.getRegionNameAsString(), e);
+              + region_a.getRegionInfo().getRegionNameAsString() + " and "
+              + region_b.getRegionInfo().getRegionNameAsString(), e);
       return false;
     }
 
@@ -275,7 +276,7 @@ public class RegionMergeTransaction {
   HRegion createMergedRegion(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting merge of " + region_a + " and "
-        + region_b.getRegionNameAsString() + ", forcible=" + forcible);
+        + region_b.getRegionInfo().getRegionNameAsString() + ", forcible=" + forcible);
     if ((server != null && server.isStopped())
         || (services != null && services.isStopping())) {
       throw new IOException("Server is stopped or stopping");
@@ -584,7 +585,7 @@ public class RegionMergeTransaction {
     boolean stopped = server != null && server.isStopped();
     boolean stopping = services != null && services.isStopping();
     if (stopped || stopping) {
-      LOG.info("Not opening merged region  " + merged.getRegionNameAsString()
+      LOG.info("Not opening merged region  " + merged.getRegionInfo().getRegionNameAsString()
           + " because stopping=" + stopping + ", stopped=" + stopped);
       return;
     }
@@ -695,7 +696,7 @@ public class RegionMergeTransaction {
             this.region_a.initialize();
           } catch (IOException e) {
             LOG.error("Failed rollbacking CLOSED_REGION_A of region "
-                + this.region_a.getRegionNameAsString(), e);
+                + this.region_a.getRegionInfo().getRegionNameAsString(), e);
             throw new RuntimeException(e);
           }
           break;
@@ -710,7 +711,7 @@ public class RegionMergeTransaction {
             this.region_b.initialize();
           } catch (IOException e) {
             LOG.error("Failed rollbacking CLOSED_REGION_A of region "
-                + this.region_b.getRegionNameAsString(), e);
+                + this.region_b.getRegionInfo().getRegionNameAsString(), e);
             throw new RuntimeException(e);
           }
           break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index cf95528..aba09ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -23,8 +23,10 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.master.TableLockManager;
@@ -37,9 +39,9 @@ import com.google.protobuf.Service;
 /**
  * Services provided by {@link HRegionServer}
  */
-@InterfaceAudience.Private
-public interface RegionServerServices
-    extends OnlineRegions, FavoredNodesForRegion {
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+@InterfaceStability.Evolving
+public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegion {
   /**
    * @return True if this regionserver is stopping.
    */
@@ -77,8 +79,7 @@ public interface RegionServerServices
    * @throws KeeperException
    * @throws IOException
    */
-  void postOpenDeployTasks(final HRegion r)
-  throws KeeperException, IOException;
+  void postOpenDeployTasks(final Region r) throws KeeperException, IOException;
 
   /**
    * Notify master that a handler requests to change a region state
@@ -119,7 +120,7 @@ public interface RegionServerServices
   /**
    * @return set of recovering regions on the hosting region server
    */
-  Map<String, HRegion> getRecoveringRegions();
+  Map<String, Region> getRecoveringRegions();
 
   /**
    * Only required for "old" log replay; if it's removed, remove this.

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
index 77611da..22bdccb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
-import java.util.Map;
+import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -74,11 +74,11 @@ public abstract class RegionSplitPolicy extends Configured {
     if (explicitSplitPoint != null) {
       return explicitSplitPoint;
     }
-    Map<byte[], Store> stores = region.getStores();
+    List<Store> stores = region.getStores();
 
     byte[] splitPointFromLargestStore = null;
     long largestStoreSize = 0;
-    for (Store s : stores.values()) {
+    for (Store s : stores) {
       byte[] splitPoint = s.getSplitPoint();
       long storeSize = s.getSize();
       if (splitPoint != null && largestStoreSize < storeSize) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index 928a9f6..ed972ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -42,9 +42,9 @@ class SplitRequest implements Runnable {
   private final HRegionServer server;
   private TableLock tableLock;
 
-  SplitRequest(HRegion region, byte[] midKey, HRegionServer hrs) {
+  SplitRequest(Region region, byte[] midKey, HRegionServer hrs) {
     Preconditions.checkNotNull(hrs);
-    this.parent = region;
+    this.parent = (HRegion)region;
     this.midKey = midKey;
     this.server = hrs;
   }
@@ -69,7 +69,7 @@ class SplitRequest implements Runnable {
       //acquire a shared read lock on the table, so that table schema modifications
       //do not happen concurrently
       tableLock = server.getTableLockManager().readLock(parent.getTableDesc().getTableName()
-          , "SPLIT_REGION:" + parent.getRegionNameAsString());
+          , "SPLIT_REGION:" + parent.getRegionInfo().getRegionNameAsString());
       try {
         tableLock.acquire();
       } catch (IOException ex) {
@@ -87,22 +87,22 @@ class SplitRequest implements Runnable {
         if (this.server.isStopping() || this.server.isStopped()) {
           LOG.info(
               "Skip rollback/cleanup of failed split of "
-                  + parent.getRegionNameAsString() + " because server is"
+                  + parent.getRegionInfo().getRegionNameAsString() + " because server is"
                   + (this.server.isStopping() ? " stopping" : " stopped"), e);
           return;
         }
         try {
           LOG.info("Running rollback/cleanup of failed split of " +
-            parent.getRegionNameAsString() + "; " + e.getMessage(), e);
+            parent.getRegionInfo().getRegionNameAsString() + "; " + e.getMessage(), e);
           if (st.rollback(this.server, this.server)) {
             LOG.info("Successful rollback of failed split of " +
-              parent.getRegionNameAsString());
+              parent.getRegionInfo().getRegionNameAsString());
           } else {
             this.server.abort("Abort; we got an error after point-of-no-return");
           }
         } catch (RuntimeException ee) {
           String msg = "Failed rollback of failed split of " +
-            parent.getRegionNameAsString() + " -- aborting server";
+            parent.getRegionInfo().getRegionNameAsString() + " -- aborting server";
           // If failed rollback, kill this server to avoid having a hole in table.
           LOG.info(msg, ee);
           this.server.abort(msg + " -- Cause: " + ee.getMessage());
@@ -132,7 +132,7 @@ class SplitRequest implements Runnable {
         server.metricsRegionServer.incrSplitSuccess();
         // Log success
         LOG.info("Region split, hbase:meta updated, and report to master. Parent="
-            + parent.getRegionNameAsString() + ", new regions: "
+            + parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
             + st.getFirstDaughter().getRegionNameAsString() + ", "
             + st.getSecondDaughter().getRegionNameAsString() + ". Split took "
             + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime));
@@ -150,7 +150,7 @@ class SplitRequest implements Runnable {
         LOG.error("Could not release the table lock (something is really wrong). " 
            + "Aborting this server to avoid holding the lock forever.");
         this.server.abort("Abort; we got an error when releasing the table lock "
-                         + "on " + parent.getRegionNameAsString());
+                         + "on " + parent.getRegionInfo().getRegionNameAsString());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index 5f4bd43..c3bcfec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -204,11 +204,11 @@ public class SplitTransaction {
    * @param r Region to split
    * @param splitrow Row to split around
    */
-  public SplitTransaction(final HRegion r, final byte [] splitrow) {
-    this.parent = r;
+  public SplitTransaction(final Region r, final byte [] splitrow) {
+    this.parent = (HRegion)r;
     this.splitrow = splitrow;
     this.journal.add(new JournalEntry(JournalEntryType.STARTED));
-    useZKForAssignment = ConfigUtil.useZKForAssignment(r.getBaseConf());
+    useZKForAssignment = ConfigUtil.useZKForAssignment(parent.getBaseConf());
   }
 
   /**
@@ -267,7 +267,7 @@ public class SplitTransaction {
    *    Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    */
-  /* package */PairOfSameType<HRegion> createDaughters(final Server server,
+  /* package */PairOfSameType<Region> createDaughters(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting split of region " + this.parent);
     if ((server != null && server.isStopped()) ||
@@ -295,14 +295,14 @@ public class SplitTransaction {
         server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
           this.fileSplitTimeout);
 
-    PairOfSameType<HRegion> daughterRegions = stepsBeforePONR(server, services, testing);
+    PairOfSameType<Region> daughterRegions = stepsBeforePONR(server, services, testing);
 
     List<Mutation> metaEntries = new ArrayList<Mutation>();
     if (this.parent.getCoprocessorHost() != null) {
       if (this.parent.getCoprocessorHost().
           preSplitBeforePONR(this.splitrow, metaEntries)) {
         throw new IOException("Coprocessor bypassing region "
-            + this.parent.getRegionNameAsString() + " split.");
+            + this.parent.getRegionInfo().getRegionNameAsString() + " split.");
       }
       try {
         for (Mutation p : metaEntries) {
@@ -360,7 +360,7 @@ public class SplitTransaction {
     return daughterRegions;
   }
 
-  public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
+  public PairOfSameType<Region> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
 
     if (useCoordinatedStateManager(server)) {
@@ -376,7 +376,7 @@ public class SplitTransaction {
       if (!services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
           parent.getRegionInfo(), hri_a, hri_b)) {
         throw new IOException("Failed to get ok from master to split "
-          + parent.getRegionNameAsString());
+          + parent.getRegionInfo().getRegionNameAsString());
       }
     }
     this.journal.add(new JournalEntry(JournalEntryType.SET_SPLITTING));
@@ -431,7 +431,7 @@ public class SplitTransaction {
     this.journal.add(new JournalEntry(JournalEntryType.STARTED_REGION_A_CREATION));
     assertReferenceFileCount(expectedReferences.getFirst(),
         this.parent.getRegionFileSystem().getSplitsDir(this.hri_a));
-    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
+    Region a = this.parent.createDaughterRegionFromSplits(this.hri_a);
     assertReferenceFileCount(expectedReferences.getFirst(),
         new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName()));
 
@@ -439,11 +439,11 @@ public class SplitTransaction {
     this.journal.add(new JournalEntry(JournalEntryType.STARTED_REGION_B_CREATION));
     assertReferenceFileCount(expectedReferences.getSecond(),
         this.parent.getRegionFileSystem().getSplitsDir(this.hri_b));
-    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
+    Region b = this.parent.createDaughterRegionFromSplits(this.hri_b);
     assertReferenceFileCount(expectedReferences.getSecond(),
         new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName()));
 
-    return new PairOfSameType<HRegion>(a, b);
+    return new PairOfSameType<Region>(a, b);
   }
 
   void assertReferenceFileCount(int expectedReferenceFileCount, Path dir)
@@ -464,7 +464,7 @@ public class SplitTransaction {
    *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void openDaughters(final Server server,
-      final RegionServerServices services, HRegion a, HRegion b)
+      final RegionServerServices services, Region a, Region b)
       throws IOException {
     boolean stopped = server != null && server.isStopped();
     boolean stopping = services != null && services.isStopping();
@@ -477,8 +477,8 @@ public class SplitTransaction {
           " because stopping=" + stopping + ", stopped=" + stopped);
     } else {
       // Open daughters in parallel.
-      DaughterOpener aOpener = new DaughterOpener(server, a);
-      DaughterOpener bOpener = new DaughterOpener(server, b);
+      DaughterOpener aOpener = new DaughterOpener(server, (HRegion)a);
+      DaughterOpener bOpener = new DaughterOpener(server, (HRegion)b);
       aOpener.start();
       bOpener.start();
       try {
@@ -534,7 +534,7 @@ public class SplitTransaction {
    * @throws IOException
    * @see #rollback(Server, RegionServerServices)
    */
-  public PairOfSameType<HRegion> execute(final Server server,
+  public PairOfSameType<Region> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
     useZKForAssignment = server == null ? true :
@@ -544,15 +544,15 @@ public class SplitTransaction {
           ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
               .getSplitTransactionCoordination().getDefaultDetails();
     }
-    PairOfSameType<HRegion> regions = createDaughters(server, services);
+    PairOfSameType<Region> regions = createDaughters(server, services);
     if (this.parent.getCoprocessorHost() != null) {
       this.parent.getCoprocessorHost().preSplitAfterPONR();
     }
     return stepsAfterPONR(server, services, regions);
   }
 
-  public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
-      final RegionServerServices services, PairOfSameType<HRegion> regions)
+  public PairOfSameType<Region> stepsAfterPONR(final Server server,
+      final RegionServerServices services, PairOfSameType<Region> regions)
       throws IOException {
     openDaughters(server, services, regions.getFirst(), regions.getSecond());
     if (useCoordinatedStateManager(server)) {
@@ -857,7 +857,7 @@ public class SplitTransaction {
           this.parent.initialize();
         } catch (IOException e) {
           LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
-            this.parent.getRegionNameAsString(), e);
+            this.parent.getRegionInfo().getRegionNameAsString(), e);
           throw new RuntimeException(e);
         }
         break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index b638a8f..a77fc0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,7 +49,7 @@ import org.apache.hadoop.hbase.util.Pair;
  * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or
  * more StoreFiles, which stretch backwards over time.
  */
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
 public interface Store extends HeapSize, StoreConfigInformation, PropagatingConfigurationObserver {
 
@@ -63,7 +64,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   Collection<StoreFile> getStorefiles();
 
   /**
-   * Close all the readers We don't need to worry about subsequent requests because the HRegion
+   * Close all the readers We don't need to worry about subsequent requests because the Region
    * holds a write lock that will prevent any more reads or writes.
    * @return the {@link StoreFile StoreFiles} that were previously being used.
    * @throws IOException on failure
@@ -241,7 +242,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   void assertBulkLoadHFileOk(Path srcPath) throws IOException;
 
   /**
-   * This method should only be called from HRegion. It is assumed that the ranges of values in the
+   * This method should only be called from Region. It is assumed that the ranges of values in the
    * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this)
    *
    * @param srcPathStr

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
index 4918391..a2a0dcc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
@@ -84,8 +84,8 @@ public class StorefileRefresherChore extends ScheduledChore {
 
   @Override
   protected void chore() {
-    for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
-      if (!r.writestate.isReadOnly()) {
+    for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+      if (!r.isReadOnly()) {
         // skip checking for this region if it can accept writes
         continue;
       }
@@ -98,7 +98,7 @@ public class StorefileRefresherChore extends ScheduledChore {
         lastRefreshTimes.put(encodedName, time);
       }
       try {
-        for (Store store : r.getStores().values()) {
+        for (Store store : r.getStores()) {
           // TODO: some stores might see new data from flush, while others do not which
           // MIGHT break atomic edits across column families. We can fix this with setting
           // mvcc read numbers that we know every store has seen
@@ -110,12 +110,12 @@ public class StorefileRefresherChore extends ScheduledChore {
 
         // Store files have a TTL in the archive directory. If we fail to refresh for that long, we stop serving reads
         if (isRegionStale(encodedName, time)) {
-          r.setReadsEnabled(false); // stop serving reads
+          ((HRegion)r).setReadsEnabled(false); // stop serving reads
         }
         continue;
       }
       lastRefreshTimes.put(encodedName, time);
-      r.setReadsEnabled(true); // restart serving reads
+      ((HRegion)r).setReadsEnabled(true); // restart serving reads
     }
 
     // remove closed regions

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
index 9e7786f..ccb0e72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
@@ -119,7 +119,7 @@ public class CloseRegionHandler extends EventHandler {
       LOG.debug("Processing close of " + name);
       String encodedRegionName = regionInfo.getEncodedName();
       // Check that this region is being served here
-      HRegion region = this.rsServices.getFromOnlineRegions(encodedRegionName);
+      HRegion region = (HRegion)rsServices.getFromOnlineRegions(encodedRegionName);
       if (region == null) {
         LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring");
         // TODO: do better than a simple warning
@@ -161,7 +161,7 @@ public class CloseRegionHandler extends EventHandler {
       }
 
       // Done!  Region is closed on this RS
-      LOG.debug("Closed " + region.getRegionNameAsString());
+      LOG.debug("Closed " + region.getRegionInfo().getRegionNameAsString());
     } finally {
       this.rsServices.getRegionsInTransitionInRS().
           remove(this.regionInfo.getEncodedNameAsBytes());

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java
index 2ff3454..19838d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
 public class FinishRegionRecoveringHandler extends EventHandler {
@@ -45,9 +46,9 @@ public class FinishRegionRecoveringHandler extends EventHandler {
 
   @Override
   public void process() throws IOException {
-    HRegion region = this.rss.getRecoveringRegions().remove(regionName);
+    Region region = this.rss.getRecoveringRegions().remove(regionName);
     if (region != null) {
-      region.setRecovering(false);
+      ((HRegion)region).setRecovering(false);
       LOG.info(path + " deleted; " + regionName + " recovered.");
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
index 611e432..998c1fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.procedure.ProcedureMember;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 
@@ -45,14 +47,14 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 public class FlushSnapshotSubprocedure extends Subprocedure {
   private static final Log LOG = LogFactory.getLog(FlushSnapshotSubprocedure.class);
 
-  private final List<HRegion> regions;
+  private final List<Region> regions;
   private final SnapshotDescription snapshot;
   private final SnapshotSubprocedurePool taskManager;
   private boolean snapshotSkipFlush = false;
 
   public FlushSnapshotSubprocedure(ProcedureMember member,
       ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout,
-      List<HRegion> regions, SnapshotDescription snapshot,
+      List<Region> regions, SnapshotDescription snapshot,
       SnapshotSubprocedurePool taskManager) {
     super(member, snapshot.getName(), errorListener, wakeFrequency, timeout);
     this.snapshot = snapshot;
@@ -68,8 +70,8 @@ public class FlushSnapshotSubprocedure extends Subprocedure {
    * Callable for adding files to snapshot manifest working dir.  Ready for multithreading.
    */
   private class RegionSnapshotTask implements Callable<Void> {
-    HRegion region;
-    RegionSnapshotTask(HRegion region) {
+    Region region;
+    RegionSnapshotTask(Region region) {
       this.region = region;
     }
 
@@ -94,9 +96,9 @@ public class FlushSnapshotSubprocedure extends Subprocedure {
           LOG.debug("take snapshot without flush memstore first");
         } else {
           LOG.debug("Flush Snapshotting region " + region.toString() + " started...");
-          region.flushcache();
+          region.flush(true);
         }
-        region.addRegionToSnapshot(snapshot, monitor);
+        ((HRegion)region).addRegionToSnapshot(snapshot, monitor);
         if (snapshotSkipFlush) {
           LOG.debug("... SkipFlush Snapshotting region " + region.toString() + " completed.");
         } else {
@@ -126,7 +128,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure {
     }
 
     // Add all hfiles already existing in region.
-    for (HRegion region : regions) {
+    for (Region region : regions) {
       // submit one task per region for parallelize by region.
       taskManager.submitTask(new RegionSnapshotTask(region));
       monitor.rethrowException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index 93d836d..021c16f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
 import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -160,7 +160,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
 
     // check to see if this server is hosting any regions for the snapshots
     // check to see if we have regions for the snapshot
-    List<HRegion> involvedRegions;
+    List<Region> involvedRegions;
     try {
       involvedRegions = getRegionsToSnapshot(snapshot);
     } catch (IOException e1) {
@@ -220,12 +220,12 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
    *         the given snapshot.
    * @throws IOException
    */
-  private List<HRegion> getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException {
-    List<HRegion> onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable()));
-    Iterator<HRegion> iterator = onlineRegions.iterator();
+  private List<Region> getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException {
+    List<Region> onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable()));
+    Iterator<Region> iterator = onlineRegions.iterator();
     // remove the non-default regions
     while (iterator.hasNext()) {
-      HRegion r = iterator.next();
+      Region r = iterator.next();
       if (!RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) {
         iterator.remove();
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index b918354..a01f8a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -61,9 +61,9 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -328,7 +328,7 @@ public class AccessControlLists {
    * Returns {@code true} if the given region is part of the {@code _acl_}
    * metadata table.
    */
-  static boolean isAclRegion(HRegion region) {
+  static boolean isAclRegion(Region region) {
     return ACL_TABLE_NAME.equals(region.getTableDesc().getTableName());
   }
 
@@ -347,8 +347,7 @@ public class AccessControlLists {
    * @return a map of the permissions for this table.
    * @throws IOException
    */
-  static Map<byte[], ListMultimap<String,TablePermission>> loadAll(
-      HRegion aclRegion)
+  static Map<byte[], ListMultimap<String,TablePermission>> loadAll(Region aclRegion)
     throws IOException {
 
     if (!isAclRegion(aclRegion)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index fc349f1..d72eef7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -85,10 +85,10 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -197,7 +197,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   // This boolean having relevance only in the Master.
   private volatile boolean aclTabAvailable = false;
 
-  public HRegion getRegion() {
+  public Region getRegion() {
     return regionEnv != null ? regionEnv.getRegion() : null;
   }
 
@@ -206,7 +206,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   }
 
   void initialize(RegionCoprocessorEnvironment e) throws IOException {
-    final HRegion region = e.getRegion();
+    final Region region = e.getRegion();
     Configuration conf = e.getConfiguration();
     Map<byte[], ListMultimap<String,TablePermission>> tables =
         AccessControlLists.loadAll(region);
@@ -1325,7 +1325,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e)
       throws IOException {
     RegionCoprocessorEnvironment env = e.getEnvironment();
-    final HRegion region = env.getRegion();
+    final Region region = env.getRegion();
     if (region == null) {
       LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
     } else {
@@ -1341,7 +1341,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   @Override
   public void postOpen(ObserverContext<RegionCoprocessorEnvironment> c) {
     RegionCoprocessorEnvironment env = c.getEnvironment();
-    final HRegion region = env.getRegion();
+    final Region region = env.getRegion();
     if (region == null) {
       LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()");
       return;
@@ -1445,7 +1445,7 @@ public class AccessController extends BaseMasterAndRegionObserver
       throw new RuntimeException("Unhandled operation " + opType);
     }
     AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ);
-    HRegion region = getRegion(env);
+    Region region = getRegion(env);
     TableName table = getTableName(region);
     Map<ByteRange, Integer> cfVsMaxVersions = Maps.newHashMap();
     for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
@@ -2290,19 +2290,19 @@ public class AccessController extends BaseMasterAndRegionObserver
     return AccessControlProtos.AccessControlService.newReflectiveService(this);
   }
 
-  private HRegion getRegion(RegionCoprocessorEnvironment e) {
+  private Region getRegion(RegionCoprocessorEnvironment e) {
     return e.getRegion();
   }
 
   private TableName getTableName(RegionCoprocessorEnvironment e) {
-    HRegion region = e.getRegion();
+    Region region = e.getRegion();
     if (region != null) {
       return getTableName(region);
     }
     return null;
   }
 
-  private TableName getTableName(HRegion region) {
+  private TableName getTableName(Region region) {
     HRegionInfo regionInfo = region.getRegionInfo();
     if (regionInfo != null) {
       return regionInfo.getTable();
@@ -2407,31 +2407,31 @@ public class AccessController extends BaseMasterAndRegionObserver
   }
 
   @Override
-  public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, HRegion regionA,
-      HRegion regionB) throws IOException {
+  public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
+      Region regionB) throws IOException {
     requirePermission("mergeRegions", regionA.getTableDesc().getTableName(), null, null,
       Action.ADMIN);
   }
 
   @Override
-  public void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, HRegion regionA,
-      HRegion regionB, HRegion mergedRegion) throws IOException { }
+  public void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
+      Region regionB, Region mergedRegion) throws IOException { }
 
   @Override
   public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-      HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException { }
+      Region regionA, Region regionB, List<Mutation> metaEntries) throws IOException { }
 
   @Override
   public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-      HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException { }
+      Region regionA, Region regionB, Region mergedRegion) throws IOException { }
 
   @Override
   public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-      HRegion regionA, HRegion regionB) throws IOException { }
+      Region regionA, Region regionB) throws IOException { }
 
   @Override
   public void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-      HRegion regionA, HRegion regionB) throws IOException { }
+      Region regionA, Region regionB) throws IOException { }
 
   @Override
   public void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx)

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
index e417417..7b3124e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security.access;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -49,7 +50,8 @@ import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBu
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadResponse;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener;
 import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -235,7 +237,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
       return;
     }
 
-    HRegion region = env.getRegion();
+    Region region = env.getRegion();
     boolean bypass = false;
     if (region.getCoprocessorHost() != null) {
       try {
@@ -352,7 +354,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
     return this;
   }
 
-  private static class SecureBulkLoadListener implements HRegion.BulkLoadListener {
+  private static class SecureBulkLoadListener implements BulkLoadListener {
     // Target filesystem
     private FileSystem fs;
     private String stagingDir;

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index 5b05d23..7c0bb53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
@@ -55,8 +56,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.OperationStatus;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
@@ -76,7 +77,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
 
   private AtomicInteger ordinalCounter = new AtomicInteger(-1);
   private Configuration conf;
-  private HRegion labelsRegion;
+  private Region labelsRegion;
   private VisibilityLabelsCache labelsCache;
   private List<ScanLabelGenerator> scanLabelGenerators;
   private List<String> superUsers;
@@ -196,7 +197,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
     return new Pair<Map<String, Integer>, Map<String, List<Integer>>>(labels, userAuths);
   }
 
-  protected void addSystemLabel(HRegion region, Map<String, Integer> labels,
+  protected void addSystemLabel(Region region, Map<String, Integer> labels,
       Map<String, List<Integer>> userAuths) throws IOException {
     if (!labels.containsKey(SYSTEM_LABEL)) {
       Put p = new Put(Bytes.toBytes(SYSTEM_LABEL_ORDINAL));
@@ -307,7 +308,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
   private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus)
       throws IOException {
     OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations
-        .toArray(new Mutation[mutations.size()]));
+        .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
     int i = 0;
     boolean updateZk = false;
     for (OperationStatus status : opStatus) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index d219ed2..62efd64 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -90,10 +90,10 @@ import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.Visibil
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.hadoop.hbase.regionserver.OperationStatus;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -514,7 +514,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
     if (!initialized) {
       throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
     }
-    HRegion region = e.getEnvironment().getRegion();
+    Region region = e.getEnvironment().getRegion();
     Authorizations authorizations = null;
     try {
       authorizations = scan.getAuthorizations();
@@ -548,7 +548,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
   public DeleteTracker postInstantiateDeleteTracker(
       ObserverContext<RegionCoprocessorEnvironment> ctx, DeleteTracker delTracker)
       throws IOException {
-    HRegion region = ctx.getEnvironment().getRegion();
+    Region region = ctx.getEnvironment().getRegion();
     TableName table = region.getRegionInfo().getTable();
     if (table.isSystemTable()) {
       return delTracker;
@@ -612,7 +612,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
     if (!initialized) {
       throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
     }
-    HRegion region = e.getEnvironment().getRegion();
+    Region region = e.getEnvironment().getRegion();
     Authorizations authorizations = null;
     try {
       authorizations = get.getAuthorizations();

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
index ebff5ff..d495b69 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUs
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations;
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel;
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
@@ -308,7 +308,7 @@ public class VisibilityUtils {
     return false;
   }
 
-  public static Filter createVisibilityLabelFilter(HRegion region, Authorizations authorizations)
+  public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations)
       throws IOException {
     Map<ByteRange, Integer> cfVsMaxVersions = new HashMap<ByteRange, Integer>();
     for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 38ccf08..007dbfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -166,7 +166,7 @@ public class SnapshotManifest {
     // 2. iterate through all the stores in the region
     LOG.debug("Creating references for hfiles");
 
-    for (Store store : region.getStores().values()) {
+    for (Store store : region.getStores()) {
       // 2.1. build the snapshot reference for the store
       Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
       monitor.rethrowException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
index fff1374..92ab4d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
@@ -60,7 +60,7 @@ public class WriteSinkCoprocessor extends BaseRegionObserver {
 
   @Override
   public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
-    regionName = e.getEnvironment().getRegion().getRegionNameAsString();
+    regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString();
   }
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/af171593/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 2e0f53d..0642db1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -204,16 +204,17 @@ class HMerge {
         if ((currentSize + nextSize) <= (maxFilesize / 2)) {
           // We merge two adjacent regions if their total size is less than
           // one half of the desired maximum size
-          LOG.info("Merging regions " + currentRegion.getRegionNameAsString() +
-            " and " + nextRegion.getRegionNameAsString());
+          LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() +
+            " and " + nextRegion.getRegionInfo().getRegionNameAsString());
           HRegion mergedRegion =
             HRegion.mergeAdjacent(currentRegion, nextRegion);
-          updateMeta(currentRegion.getRegionName(), nextRegion.getRegionName(),
-              mergedRegion);
+          updateMeta(currentRegion.getRegionInfo().getRegionName(),
+            nextRegion.getRegionInfo().getRegionName(), mergedRegion);
           break;
         }
-        LOG.info("not merging regions " + Bytes.toStringBinary(currentRegion.getRegionName())
-            + " and " + Bytes.toStringBinary(nextRegion.getRegionName()));
+        LOG.info("not merging regions " +
+          Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) +
+            " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName()));
         currentRegion.close();
         currentRegion = nextRegion;
         currentSize = nextSize;
@@ -339,7 +340,7 @@ class HMerge {
 
       if(LOG.isDebugEnabled()) {
         LOG.debug("updated columns in row: "
-            + Bytes.toStringBinary(newRegion.getRegionName()));
+            + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
       }
     }
   }


Mime
View raw message