hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zjus...@apache.org
Subject svn commit: r1460306 [4/5] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/apac...
Date Sun, 24 Mar 2013 10:26:22 GMT
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto Sun Mar 24 10:26:21 2013
@@ -141,6 +141,20 @@ message CompactRegionRequest {
 message CompactRegionResponse {
 }
 
+/**
+ * Merges the specified regions.
+ * <p>
+ * This method currently closes the regions and then merges them
+ */
+message MergeRegionsRequest {
+  required RegionSpecifier regionA = 1;
+  required RegionSpecifier regionB = 2;
+  optional bool forcible = 3 [default = false];
+}
+
+message MergeRegionsResponse {
+}
+
 message UUID {
   required uint64 leastSigBits = 1;
   required uint64 mostSigBits = 2;
@@ -240,6 +254,9 @@ service AdminService {
 
   rpc compactRegion(CompactRegionRequest)
     returns(CompactRegionResponse);
+    
+  rpc mergeRegions(MergeRegionsRequest)
+    returns(MergeRegionsResponse);
 
   rpc replicateWALEntry(ReplicateWALEntryRequest)
     returns(ReplicateWALEntryResponse);

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto Sun Mar 24 10:26:21 2013
@@ -63,6 +63,18 @@ message MoveRegionRequest {
 message MoveRegionResponse {
 }
 
+/**
+ * Dispatch merging the specified regions.
+ */
+message DispatchMergingRegionsRequest {
+  required RegionSpecifier regionA = 1;
+  required RegionSpecifier regionB = 2;
+  optional bool forcible = 3 [default = false];
+}
+
+message DispatchMergingRegionsResponse {
+}
+
 message AssignRegionRequest {
   required RegionSpecifier region = 1;
 }
@@ -243,6 +255,10 @@ service MasterAdminService {
   rpc moveRegion(MoveRegionRequest)
     returns(MoveRegionResponse);
 
+ /** Master dispatch merging the regions */
+  rpc dispatchMergingRegions(DispatchMergingRegionsRequest)
+    returns(DispatchMergingRegionsResponse);    
+
   /** Assign a region to a server chosen at random. */
   rpc assignRegion(AssignRegionRequest)
     returns(AssignRegionResponse);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java Sun Mar 24 10:26:21 2013
@@ -28,12 +28,13 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
@@ -66,6 +67,18 @@ public class MetaEditor {
   }
 
   /**
+   * Generates and returns a Delete containing the region info for the catalog
+   * table
+   */
+  public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
+    if (regionInfo == null) {
+      throw new IllegalArgumentException("Can't make a delete for null region");
+    }
+    Delete delete = new Delete(regionInfo.getRegionName());
+    return delete;
+  }
+
+  /**
    * Adds split daughters to the Put
    */
   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
@@ -261,6 +274,42 @@ public class MetaEditor {
   }
 
   /**
+   * Merge the two regions into one in an atomic operation. Deletes the two
+   * merging regions in META and adds the merged region with the information of
+   * two merging regions.
+   * @param catalogTracker the catalog tracker
+   * @param mergedRegion the merged region
+   * @param regionA
+   * @param regionB
+   * @param sn the location of the region
+   * @throws IOException
+   */
+  public static void mergeRegions(final CatalogTracker catalogTracker,
+      HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
+      ServerName sn) throws IOException {
+    HTable meta = MetaReader.getMetaHTable(catalogTracker);
+    HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
+
+    // Put for parent
+    Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
+    putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
+        regionA.toByteArray());
+    putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
+        regionB.toByteArray());
+
+    // Deletes for merging regions
+    Delete deleteA = makeDeleteFromRegionInfo(regionA);
+    Delete deleteB = makeDeleteFromRegionInfo(regionB);
+
+    // The merged is a new region, openSeqNum = 1 is fine.
+    addLocation(putOfMerged, sn, 1);
+
+    byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+        + HConstants.DELIMITER);
+    multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
+  }
+
+  /**
    * Splits the region into two in an atomic operation. Offlines the parent
    * region with the information that it is split into two, and also adds
    * the daughter regions. Does not add the location information to the daughter
@@ -291,17 +340,24 @@ public class MetaEditor {
     addLocation(putB, sn, 1);
 
     byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
-    multiPut(meta, tableRow, putParent, putA, putB);
+    multiMutate(meta, tableRow, putParent, putA, putB);
   }
 
   /**
-   * Performs an atomic multi-Put operation against the given table.
+   * Performs an atomic multi-Mutate operation against the given table.
    */
-  private static void multiPut(HTable table, byte[] row, Put... puts) throws IOException {
+  private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
     CoprocessorRpcChannel channel = table.coprocessorService(row);
     MultiMutateRequest.Builder mmrBuilder = MultiMutateRequest.newBuilder();
-    for (Put put : puts) {
-      mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, put));
+    for (Mutation mutation : mutations) {
+      if (mutation instanceof Put) {
+        mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
+      } else if (mutation instanceof Delete) {
+        mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
+      } else {
+        throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
+            + mutation.getClass().getName());
+      }
     }
 
     MultiRowMutationService.BlockingInterface service =
@@ -456,6 +512,24 @@ public class MetaEditor {
       ", from parent " + parent.getRegionNameAsString());
   }
 
+  /**
+   * Deletes merge qualifiers for the specified merged region.
+   * @param catalogTracker
+   * @param mergedRegion
+   * @throws IOException
+   */
+  public static void deleteMergeQualifiers(CatalogTracker catalogTracker,
+      final HRegionInfo mergedRegion) throws IOException {
+    Delete delete = new Delete(mergedRegion.getRegionName());
+    delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
+    delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
+    deleteFromMetaTable(catalogTracker, delete);
+    LOG.info("Deleted references in merged region "
+        + mergedRegion.getRegionNameAsString() + ", qualifier="
+        + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
+        + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
+  }
+
   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
   throws IOException {
     p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Sun Mar 24 10:26:21 2013
@@ -39,36 +39,36 @@ import java.util.concurrent.atomic.Atomi
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
+import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
+import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
 import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
 import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
+import org.apache.hadoop.hbase.master.handler.MergedRegionHandler;
 import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
 import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
-import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
-import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
@@ -85,6 +85,7 @@ import org.apache.zookeeper.KeeperExcept
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.apache.zookeeper.data.Stat;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.LinkedHashMultimap;
 
 /**
@@ -623,6 +624,24 @@ public class AssignmentManager extends Z
           // multiple times so if it's still up we will receive an update soon.
         }
         break;
+      case RS_ZK_REGION_MERGING:
+        // nothing to do
+        LOG.info("Processed region " + regionInfo.getEncodedName()
+            + " in state : " + et + " nothing to do.");
+        break;
+      case RS_ZK_REGION_MERGE:
+        if (!serverManager.isServerOnline(sn)) {
+          // ServerShutdownHandler would handle this region
+          LOG.warn("Processed region " + regionInfo.getEncodedName()
+              + " in state : " + et + " on a dead regionserver: " + sn
+              + " doing nothing");
+        } else {
+          LOG.info("Processed region " + regionInfo.getEncodedName() + " in state : " +
+              et + " nothing to do.");
+          // We don't do anything. The regionserver is supposed to update the znode
+          // multiple times so if it's still up we will receive an update soon.
+        }
+        break;
       default:
         throw new IllegalStateException("Received region in state :" + et + " is not valid.");
     }
@@ -783,6 +802,34 @@ public class AssignmentManager extends Z
             regionState.getRegion(), sn, daughters));
           break;
 
+        case RS_ZK_REGION_MERGING:
+          // Merged region is a new region, we can't find it in the region states now.
+          // Do nothing.
+          break;
+
+        case RS_ZK_REGION_MERGE:
+          // Assert that we can get a serverinfo for this server.
+          if (!this.serverManager.isServerOnline(sn)) {
+            LOG.error("Dropped merge! ServerName=" + sn + " unknown.");
+            break;
+          }
+          // Get merged and merging regions.
+          byte[] payloadOfMerge = rt.getPayload();
+          List<HRegionInfo> mergeRegions;
+          try {
+            mergeRegions = HRegionInfo.parseDelimitedFrom(payloadOfMerge, 0,
+                payloadOfMerge.length);
+          } catch (IOException e) {
+            LOG.error("Dropped merge! Failed reading merge payload for " +
+              prettyPrintedRegionName);
+            break;
+          }
+          assert mergeRegions.size() == 3;
+          // Run handler to do the rest of the MERGE handling.
+          this.executorService.submit(new MergedRegionHandler(server, this, sn,
+              mergeRegions));
+          break;
+
         case M_ZK_REGION_CLOSING:
           // Should see CLOSING after we have asked it to CLOSE or additional
           // times after already being in state of CLOSING
@@ -2056,9 +2103,9 @@ public class AssignmentManager extends Z
             NodeExistsException nee = (NodeExistsException)e;
             String path = nee.getPath();
             try {
-              if (isSplitOrSplitting(path)) {
-                LOG.debug(path + " is SPLIT or SPLITTING; " +
-                  "skipping unassign because region no longer exists -- its split");
+              if (isSplitOrSplittingOrMergeOrMerging(path)) {
+                LOG.debug(path + " is SPLIT or SPLITTING or MERGE or MERGING; " +
+                  "skipping unassign because region no longer exists -- its split or merge");
                 return;
               }
             } catch (KeeperException.NoNodeException ke) {
@@ -2136,21 +2183,23 @@ public class AssignmentManager extends Z
 
   /**
    * @param path
-   * @return True if znode is in SPLIT or SPLITTING state.
+   * @return True if znode is in SPLIT or SPLITTING or MERGE or MERGING state.
    * @throws KeeperException Can happen if the znode went away in meantime.
    * @throws DeserializationException
    */
-  private boolean isSplitOrSplitting(final String path)
+  private boolean isSplitOrSplittingOrMergeOrMerging(final String path)
       throws KeeperException, DeserializationException {
     boolean result = false;
-    // This may fail if the SPLIT or SPLITTING znode gets cleaned up before we
-    // can get data from it.
+    // This may fail if the SPLIT or SPLITTING or MERGE or MERGING znode gets
+    // cleaned up before we can get data from it.
     byte [] data = ZKAssign.getData(watcher, path);
     if (data == null) return false;
     RegionTransition rt = RegionTransition.parseFrom(data);
     switch (rt.getEventType()) {
     case RS_ZK_REGION_SPLIT:
     case RS_ZK_REGION_SPLITTING:
+    case RS_ZK_REGION_MERGE:
+    case RS_ZK_REGION_MERGING:
       result = true;
       break;
     default:
@@ -2898,9 +2947,31 @@ public class AssignmentManager extends Z
   }
 
   /**
+   * Update inmemory structures.
+   * @param sn Server that reported the merge
+   * @param merged regioninfo of merged
+   * @param a region a
+   * @param b region b
+   */
+  public void handleRegionsMergeReport(final ServerName sn,
+      final HRegionInfo merged, final HRegionInfo a, final HRegionInfo b) {
+    regionOffline(a);
+    regionOffline(b);
+    regionOnline(merged, sn);
+
+    // There's a possibility that the region was merging while a user asked
+    // the master to disable, we need to make sure we close those regions in
+    // that case. This is not racing with the region server itself since RS
+    // report is done after the regions merge transaction completed.
+    if (this.zkTable.isDisablingOrDisabledTable(merged.getTableNameAsString())) {
+      unassign(merged);
+    }
+  }
+
+  /**
    * @param plan Plan to execute.
    */
-  void balance(final RegionPlan plan) {
+  public void balance(final RegionPlan plan) {
     synchronized (this.regionPlans) {
       this.regionPlans.put(plan.getRegionName(), plan);
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Sun Mar 24 10:26:21 2013
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
@@ -45,13 +46,14 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.Triple;
 
 /**
  * A janitor for the catalog tables.  Scans the <code>.META.</code> catalog
  * table on a period looking for unused regions to garbage collect.
  */
 @InterfaceAudience.Private
-class CatalogJanitor extends Chore {
+public class CatalogJanitor extends Chore {
   private static final Log LOG = LogFactory.getLog(CatalogJanitor.class.getName());
   private final Server server;
   private final MasterServices services;
@@ -102,16 +104,37 @@ class CatalogJanitor extends Chore {
   }
 
   /**
-   * Scans META and returns a number of scanned rows, and
-   * an ordered map of split parents.
+   * Scans META and returns a number of scanned rows, and a map of merged
+   * regions, and an ordered map of split parents.
+   * @return triple of scanned rows, map of merged regions and map of split
+   *         parent regioninfos
+   * @throws IOException
    */
-  Pair<Integer, Map<HRegionInfo, Result>> getSplitParents() throws IOException {
+  Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents()
+      throws IOException {
+    return getMergedRegionsAndSplitParents(null);
+  }
+
+  /**
+   * Scans META and returns a number of scanned rows, and a map of merged
+   * regions, and an ordered map of split parents. if the given table name is
+   * null, return merged regions and split parents of all tables, else only the
+   * specified table
+   * @param tableName null represents all tables
+   * @return triple of scanned rows, and map of merged regions, and map of split
+   *         parent regioninfos
+   * @throws IOException
+   */
+  Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
+      final byte[] tableName) throws IOException {
+    final boolean isTableSpecified = (tableName != null && tableName.length != 0);
     // TODO: Only works with single .META. region currently.  Fix.
     final AtomicInteger count = new AtomicInteger(0);
     // Keep Map of found split parents.  There are candidates for cleanup.
     // Use a comparator that has split parents come before its daughters.
     final Map<HRegionInfo, Result> splitParents =
       new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
+    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
     // This visitor collects split parents and counts rows in the .META. table
     MetaReader.Visitor visitor = new MetaReader.Visitor() {
       @Override
@@ -120,20 +143,72 @@ class CatalogJanitor extends Chore {
         count.incrementAndGet();
         HRegionInfo info = HRegionInfo.getHRegionInfo(r);
         if (info == null) return true; // Keep scanning
+        if (isTableSpecified
+            && Bytes.compareTo(info.getTableName(), tableName) > 0) {
+          // Another table, stop scanning
+          return false;
+        }
         if (info.isSplitParent()) splitParents.put(info, r);
+        if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
+          mergedRegions.put(info, r);
+        }
         // Returning true means "keep scanning"
         return true;
       }
     };
-    // Run full scan of .META. catalog table passing in our custom visitor
-    MetaReader.fullScan(this.server.getCatalogTracker(), visitor);
 
-    return new Pair<Integer, Map<HRegionInfo, Result>>(count.get(), splitParents);
+    byte[] startRow = (!isTableSpecified) ? HConstants.EMPTY_START_ROW
+        : HRegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW,
+            HConstants.ZEROES, false);
+    // Run full scan of .META. catalog table passing in our custom visitor with
+    // the start row
+    MetaReader.fullScan(this.server.getCatalogTracker(), visitor, startRow);
+
+    return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
+        count.get(), mergedRegions, splitParents);
+  }
+
+  /**
+   * If merged region no longer holds reference to the merge regions, archive
+   * merge region on hdfs and perform deleting references in .META.
+   * @param mergedRegion
+   * @param regionA
+   * @param regionB
+   * @return true if we delete references in merged region on .META. and archive
+   *         the files on the file system
+   * @throws IOException
+   */
+  boolean cleanMergeRegion(final HRegionInfo mergedRegion,
+      final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
+    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
+    Path rootdir = this.services.getMasterFileSystem().getRootDir();
+    Path tabledir = HTableDescriptor.getTableDir(rootdir,
+        mergedRegion.getTableName());
+    HTableDescriptor htd = getTableDescriptor(mergedRegion
+        .getTableNameAsString());
+    HRegionFileSystem regionFs = null;
+    try {
+      regionFs = HRegionFileSystem.openRegionFromFileSystem(
+          this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
+    } catch (IOException e) {
+      LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
+    }
+    if (regionFs == null || !regionFs.hasReferences(htd)) {
+      LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
+          + regionB.getRegionNameAsString()
+          + " from fs because merged region no longer holds references");
+      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
+      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
+      MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
+      return true;
+    }
+    return false;
   }
 
   /**
    * Run janitorial scan of catalog <code>.META.</code> table looking for
    * garbage to collect.
+   * @return number of cleaned regions
    * @throws IOException
    */
   int scan() throws IOException {
@@ -141,18 +216,44 @@ class CatalogJanitor extends Chore {
       if (!alreadyRunning.compareAndSet(false, true)) {
         return 0;
       }
-      Pair<Integer, Map<HRegionInfo, Result>> pair = getSplitParents();
-      int count = pair.getFirst();
-      Map<HRegionInfo, Result> splitParents = pair.getSecond();
+      Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> scanTriple =
+        getMergedRegionsAndSplitParents();
+      int count = scanTriple.getFirst();
+      /**
+       * clean merge regions first
+       */
+      int mergeCleaned = 0;
+      Map<HRegionInfo, Result> mergedRegions = scanTriple.getSecond();
+      for (Map.Entry<HRegionInfo, Result> e : mergedRegions.entrySet()) {
+        HRegionInfo regionA = HRegionInfo.getHRegionInfo(e.getValue(),
+            HConstants.MERGEA_QUALIFIER);
+        HRegionInfo regionB = HRegionInfo.getHRegionInfo(e.getValue(),
+            HConstants.MERGEB_QUALIFIER);
+        if (regionA == null || regionB == null) {
+          LOG.warn("Unexpected references regionA="
+              + (regionA == null ? "null" : regionA.getRegionNameAsString())
+              + ",regionB="
+              + (regionB == null ? "null" : regionB.getRegionNameAsString())
+              + " in merged region " + e.getKey().getRegionNameAsString());
+        } else {
+          if (cleanMergeRegion(e.getKey(), regionA, regionB)) {
+            mergeCleaned++;
+          }
+        }
+      }
+      /**
+       * clean split parents
+       */
+      Map<HRegionInfo, Result> splitParents = scanTriple.getThird();
 
       // Now work on our list of found parents. See if any we can clean up.
-      int cleaned = 0;
-    //regions whose parents are still around
+      int splitCleaned = 0;
+      // regions whose parents are still around
       HashSet<String> parentNotCleaned = new HashSet<String>();
       for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
         if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
             cleanParent(e.getKey(), e.getValue())) {
-          cleaned++;
+          splitCleaned++;
         } else {
           // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160)
           PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(e.getValue());
@@ -160,14 +261,16 @@ class CatalogJanitor extends Chore {
           parentNotCleaned.add(daughters.getSecond().getEncodedName());
         }
       }
-      if (cleaned != 0) {
-        LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
-            " unreferenced parent region(s)");
+      if ((mergeCleaned + splitCleaned) != 0) {
+        LOG.info("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
+            + " unreferenced merged region(s) and " + splitCleaned
+            + " unreferenced parent region(s)");
       } else if (LOG.isDebugEnabled()) {
-        LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
-            " unreferenced parent region(s)");
+        LOG.debug("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
+            + " unreferenced merged region(s) and " + splitCleaned
+            + " unreferenced parent region(s)");
       }
-      return cleaned;
+      return mergeCleaned + splitCleaned;
     } finally {
       alreadyRunning.set(false);
     }
@@ -220,6 +323,14 @@ class CatalogJanitor extends Chore {
   boolean cleanParent(final HRegionInfo parent, Result rowContent)
   throws IOException {
     boolean result = false;
+    // Check whether it is a merged region and not clean reference
+    // No necessary to check MERGEB_QUALIFIER because these two qualifiers will
+    // be inserted/deleted together
+    if (rowContent.getValue(HConstants.CATALOG_FAMILY,
+        HConstants.MERGEA_QUALIFIER) != null) {
+      // wait cleaning merge region first
+      return result;
+    }
     // Run checks on each daughter split.
     PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
     Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
@@ -309,4 +420,33 @@ class CatalogJanitor extends Chore {
       throws FileNotFoundException, IOException {
     return this.services.getTableDescriptors().get(tableName);
   }
+
+  /**
+   * Checks if the specified region has merge qualifiers, if so, try to clean
+   * them
+   * @param region
+   * @return true if the specified region doesn't have merge qualifier now
+   * @throws IOException
+   */
+  public boolean cleanMergeQualifier(final HRegionInfo region)
+      throws IOException {
+    // Get merge regions if it is a merged region and already has merge
+    // qualifier
+    Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaReader
+        .getRegionsFromMergeQualifier(this.services.getCatalogTracker(),
+            region.getRegionName());
+    if (mergeRegions == null
+        || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
+      // It doesn't have merge qualifier, no need to clean
+      return true;
+    }
+    // It shouldn't happen, we must insert/delete these two qualifiers together
+    if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
+      LOG.error("Merged region " + region.getRegionNameAsString()
+          + " has only one merge qualifier in META.");
+      return false;
+    }
+    return cleanMergeRegion(region, mergeRegions.getFirst(),
+        mergeRegions.getSecond());
+  }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Sun Mar 24 10:26:21 2013
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.Abortable
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -56,17 +55,11 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.HealthCheckChore;
 import org.apache.hadoop.hbase.MasterAdminProtocol;
 import org.apache.hadoop.hbase.MasterMonitorProtocol;
-import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
-import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
 import org.apache.hadoop.hbase.RegionServerStatusProtocol;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.exceptions.TableNotDisabledException;
-import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
-import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -75,13 +68,20 @@ import org.apache.hadoop.hbase.client.Me
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
+import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
+import org.apache.hadoop.hbase.exceptions.TableNotDisabledException;
+import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
+import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
+import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.ipc.HBaseServer;
 import org.apache.hadoop.hbase.ipc.HBaseServerRPC;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
-import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.master.balancer.BalancerChore;
 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
@@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.master.cl
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
 import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
+import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
 import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
 import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
 import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
@@ -125,6 +126,8 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
@@ -1396,6 +1399,57 @@ Server {
   }
 
   @Override
+  public DispatchMergingRegionsResponse dispatchMergingRegions(
+      RpcController controller, DispatchMergingRegionsRequest request)
+      throws ServiceException {
+    final byte[] encodedNameOfRegionA = request.getRegionA().getValue()
+        .toByteArray();
+    final byte[] encodedNameOfRegionB = request.getRegionB().getValue()
+        .toByteArray();
+    final boolean forcible = request.getForcible();
+    if (request.getRegionA().getType() != RegionSpecifierType.ENCODED_REGION_NAME
+        || request.getRegionB().getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
+      LOG.warn("mergeRegions specifier type: expected: "
+          + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region_a="
+          + request.getRegionA().getType() + ", region_b="
+          + request.getRegionB().getType());
+    }
+    RegionState regionStateA = assignmentManager.getRegionStates()
+        .getRegionState(Bytes.toString(encodedNameOfRegionA));
+    RegionState regionStateB = assignmentManager.getRegionStates()
+        .getRegionState(Bytes.toString(encodedNameOfRegionB));
+    if (regionStateA == null || regionStateB == null) {
+      throw new ServiceException(new UnknownRegionException(
+          Bytes.toStringBinary(regionStateA == null ? encodedNameOfRegionA
+              : encodedNameOfRegionB)));
+    }
+
+    if (!forcible && !HRegionInfo.areAdjacent(regionStateA.getRegion(),
+            regionStateB.getRegion())) {
+      throw new ServiceException("Unable to merge not adjacent regions "
+          + regionStateA.getRegion().getRegionNameAsString() + ", "
+          + regionStateB.getRegion().getRegionNameAsString()
+          + " where forcible = " + forcible);
+    }
+
+    try {
+      dispatchMergingRegions(regionStateA.getRegion(), regionStateB.getRegion(), forcible);
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+
+    return DispatchMergingRegionsResponse.newBuilder().build();
+  }
+
+  @Override
+  public void dispatchMergingRegions(final HRegionInfo region_a,
+      final HRegionInfo region_b, final boolean forcible) throws IOException {
+    checkInitialized();
+    this.executorService.submit(new DispatchMergingRegionHandler(this,
+        this.catalogJanitorChore, region_a, region_b, forcible));
+  }
+
+  @Override
   public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
   throws ServiceException {
     final byte [] encodedRegionName = req.getRegion().getValue().toByteArray();

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Sun Mar 24 10:26:21 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -171,4 +172,16 @@ public interface MasterServices extends 
    */
   public boolean registerService(Service instance);
 
+  /**
+   * Merge two regions. The real implementation is on the regionserver, master
+   * just move the regions together and send MERGE RPC to regionserver
+   * @param region_a region to merge
+   * @param region_b region to merge
+   * @param forcible true if do a compulsory merge, otherwise we will only merge
+   *          two adjacent regions
+   * @throws IOException
+   */
+  public void dispatchMergingRegions(final HRegionInfo region_a,
+      final HRegionInfo region_b, final boolean forcible) throws IOException;
+
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Sun Mar 24 10:26:21 2013
@@ -669,6 +669,36 @@ public class ServerManager {
     return sendRegionClose(server, region, versionOfClosingNode, null, true);
   }
 
+  /**
+   * Sends an MERGE REGIONS RPC to the specified server to merge the specified
+   * regions.
+   * <p>
+   * A region server could reject the close request because it either does not
+   * have the specified region.
+   * @param server server to merge regions
+   * @param region_a region to merge
+   * @param region_b region to merge
+   * @param forcible true if do a compulsory merge, otherwise we will only merge
+   *          two adjacent regions
+   * @throws IOException
+   */
+  public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
+      HRegionInfo region_b, boolean forcible) throws IOException {
+    if (server == null)
+      throw new NullPointerException("Passed server is null");
+    if (region_a == null || region_b == null)
+      throw new NullPointerException("Passed region is null");
+    AdminProtocol admin = getServerConnection(server);
+    if (admin == null) {
+      throw new IOException("Attempting to send MERGE REGIONS RPC to server "
+          + server.toString() + " for region "
+          + region_a.getRegionNameAsString() + ","
+          + region_b.getRegionNameAsString()
+          + " failed because no RPC connection found to this server");
+    }
+    ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible);
+  }
+
     /**
     * @param sn
     * @return

Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java?rev=1460306&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java Sun Mar 24 10:26:21 2013
@@ -0,0 +1,164 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.CatalogJanitor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Handles MERGE regions request on master: move the regions together(on the
+ * same regionserver) and send MERGE RPC to regionserver.
+ *
+ * NOTE:The real merge is executed on the regionserver
+ *
+ */
+@InterfaceAudience.Private
+public class DispatchMergingRegionHandler extends EventHandler {
+  private static final Log LOG = LogFactory.getLog(DispatchMergingRegionHandler.class);
+  private final MasterServices masterServices;
+  private final CatalogJanitor catalogJanitor;
+  private HRegionInfo region_a;
+  private HRegionInfo region_b;
+  private final boolean forcible;
+  private final int timeout;
+
+  public DispatchMergingRegionHandler(final MasterServices services,
+      final CatalogJanitor catalogJanitor, final HRegionInfo region_a,
+      final HRegionInfo region_b, final boolean forcible) {
+    super(services, EventType.C_M_MERGE_REGION);
+    this.masterServices = services;
+    this.catalogJanitor = catalogJanitor;
+    this.region_a = region_a;
+    this.region_b = region_b;
+    this.forcible = forcible;
+    this.timeout = server.getConfiguration().getInt(
+        "hbase.master.regionmerge.timeout", 30 * 1000);
+  }
+
+  @Override
+  public void process() throws IOException {
+    boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(region_a);
+    if (regionAHasMergeQualifier
+        || !catalogJanitor.cleanMergeQualifier(region_b)) {
+      LOG.info("Skip merging regions " + region_a.getRegionNameAsString()
+          + ", " + region_b.getRegionNameAsString() + ", because region "
+          + (regionAHasMergeQualifier ? region_a.getEncodedName() : region_b
+              .getEncodedName()) + " has merge qualifier");
+      return;
+    }
+
+    RegionStates regionStates = masterServices.getAssignmentManager()
+        .getRegionStates();
+    ServerName region_a_location = regionStates.getRegionServerOfRegion(region_a);
+    ServerName region_b_location = regionStates.getRegionServerOfRegion(region_b);
+    if (region_a_location == null || region_b_location == null) {
+      LOG.info("Skip merging regions " + region_a.getRegionNameAsString()
+          + ", " + region_b.getRegionNameAsString() + ", because region "
+          + (region_a_location == null ? region_a.getEncodedName() : region_b
+              .getEncodedName()) + " is not online now");
+      return;
+    }
+    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    boolean onSameRS = region_a_location.equals(region_b_location);
+
+    // Make sure regions are on the same regionserver before send merge
+    // regions request to regionserver
+    if (!onSameRS) {
+      // Move region_b to region a's location, switch region_a and region_b if
+      // region_a's load lower than region_b's, so we will always move lower
+      // load region
+      RegionLoad loadOfRegionA = masterServices.getServerManager()
+          .getLoad(region_a_location).getRegionsLoad()
+          .get(region_a.getRegionName());
+      RegionLoad loadOfRegionB = masterServices.getServerManager()
+          .getLoad(region_b_location).getRegionsLoad()
+          .get(region_b.getRegionName());
+      if (loadOfRegionA != null && loadOfRegionB != null
+          && loadOfRegionA.getRequestsCount() < loadOfRegionB
+              .getRequestsCount()) {
+        // switch region_a and region_b
+        HRegionInfo tmpRegion = this.region_a;
+        this.region_a = this.region_b;
+        this.region_b = tmpRegion;
+        ServerName tmpLocation = region_a_location;
+        region_a_location = region_b_location;
+        region_b_location = tmpLocation;
+      }
+
+      RegionPlan regionPlan = new RegionPlan(region_b, region_b_location,
+          region_a_location);
+      masterServices.getAssignmentManager().balance(regionPlan);
+      while (!masterServices.isStopped()) {
+        try {
+          Thread.sleep(20);
+          region_b_location = masterServices.getAssignmentManager()
+              .getRegionStates().getRegionServerOfRegion(region_b);
+          onSameRS = region_a_location.equals(region_b_location);
+          if (onSameRS || !regionStates.isRegionInTransition(region_b)) {
+            // Regions are on the same RS, or region_b is not in
+            // RegionInTransition any more
+            break;
+          }
+          if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) break;
+        } catch (InterruptedException e) {
+          InterruptedIOException iioe = new InterruptedIOException();
+          iioe.initCause(e);
+          throw iioe;
+        }
+      }
+    }
+
+    if (onSameRS) {
+      try{
+        masterServices.getServerManager().sendRegionsMerge(region_a_location,
+            region_a, region_b, forcible);
+        LOG.info("Successfully send MERGE REGIONS RPC to server "
+            + region_a_location.toString() + " for region "
+            + region_a.getRegionNameAsString() + ","
+            + region_b.getRegionNameAsString() + ", focible=" + forcible);
+      } catch (IOException ie) {
+        LOG.info("Failed send MERGE REGIONS RPC to server "
+            + region_a_location.toString() + " for region "
+            + region_a.getRegionNameAsString() + ","
+            + region_b.getRegionNameAsString() + ", focible=" + forcible + ", "
+            + ie.getMessage());
+      }
+    } else {
+      LOG.info("Cancel merging regions " + region_a.getRegionNameAsString()
+          + ", " + region_b.getRegionNameAsString()
+          + ", because can't move them together after "
+          + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
+    }
+  }
+
+}

Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java?rev=1460306&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java Sun Mar 24 10:26:21 2013
@@ -0,0 +1,117 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.zookeeper.ZKAssign;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * Handles MERGE regions event on Master, master receive the merge report from
+ * the regionserver, then offline the merging regions and online the merged
+ * region.Here region_a sorts before region_b.
+ */
+@InterfaceAudience.Private
+public class MergedRegionHandler extends EventHandler implements
+    TotesHRegionInfo {
+  private static final Log LOG = LogFactory.getLog(MergedRegionHandler.class);
+  private final AssignmentManager assignmentManager;
+  private final HRegionInfo merged;
+  private final HRegionInfo region_a;
+  private final HRegionInfo region_b;
+  private final ServerName sn;
+
+  public MergedRegionHandler(Server server,
+      AssignmentManager assignmentManager, ServerName sn,
+      final List<HRegionInfo> mergeRegions) {
+    super(server, EventType.RS_ZK_REGION_MERGE);
+    assert mergeRegions.size() == 3;
+    this.assignmentManager = assignmentManager;
+    this.merged = mergeRegions.get(0);
+    this.region_a = mergeRegions.get(1);
+    this.region_b = mergeRegions.get(2);
+    this.sn = sn;
+  }
+
+  @Override
+  public HRegionInfo getHRegionInfo() {
+    return this.merged;
+  }
+
+  @Override
+  public String toString() {
+    String name = "UnknownServerName";
+    if (server != null && server.getServerName() != null) {
+      name = server.getServerName().toString();
+    }
+    String mergedRegion = "UnknownRegion";
+    if (merged != null) {
+      mergedRegion = merged.getRegionNameAsString();
+    }
+    return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-"
+        + mergedRegion;
+  }
+
+  @Override
+  public void process() {
+    String encodedRegionName = this.merged.getEncodedName();
+    LOG.debug("Handling MERGE event for " + encodedRegionName
+        + "; deleting node");
+
+    this.assignmentManager.handleRegionsMergeReport(this.sn, this.merged,
+        this.region_a, this.region_b);
+    // Remove region from ZK
+    try {
+
+      boolean successful = false;
+      while (!successful) {
+        // It's possible that the RS tickles in between the reading of the
+        // znode and the deleting, so it's safe to retry.
+        successful = ZKAssign.deleteNode(this.server.getZooKeeper(),
+            encodedRegionName, EventType.RS_ZK_REGION_MERGE);
+      }
+    } catch (KeeperException e) {
+      if (e instanceof NoNodeException) {
+        String znodePath = ZKUtil.joinZNode(
+            this.server.getZooKeeper().splitLogZNode, encodedRegionName);
+        LOG.debug("The znode " + znodePath
+            + " does not exist.  May be deleted already.");
+      } else {
+        server.abort("Error deleting MERGE node in ZK for transition ZK node ("
+            + merged.getEncodedName() + ")", e);
+      }
+    }
+    LOG.info("Handled MERGE event; merged="
+        + this.merged.getRegionNameAsString() + " region_a="
+        + this.region_a.getRegionNameAsString() + "region_b="
+        + this.region_b.getRegionNameAsString());
+  }
+}

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Sun Mar 24 10:26:21 2013
@@ -57,6 +57,7 @@ public class CompactSplitThread implemen
   private final ThreadPoolExecutor largeCompactions;
   private final ThreadPoolExecutor smallCompactions;
   private final ThreadPoolExecutor splits;
+  private final ThreadPoolExecutor mergePool;
 
   /**
    * Splitting should not take place if the total number of regions exceed this.
@@ -118,6 +119,16 @@ public class CompactSplitThread implemen
             return t;
           }
       });
+    int mergeThreads = conf.getInt("hbase.regionserver.thread.merge", 1);
+    this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
+        mergeThreads, new ThreadFactory() {
+          @Override
+          public Thread newThread(Runnable r) {
+            Thread t = new Thread(r);
+            t.setName(n + "-merges-" + System.currentTimeMillis());
+            return t;
+          }
+        });
   }
 
   @Override
@@ -125,7 +136,8 @@ public class CompactSplitThread implemen
     return "compaction_queue=("
         + largeCompactions.getQueue().size() + ":"
         + smallCompactions.getQueue().size() + ")"
-        + ", split_queue=" + splits.getQueue().size();
+        + ", split_queue=" + splits.getQueue().size()
+        + ", merge_queue=" + mergePool.getQueue().size();
   }
   
   public String dumpQueue() {
@@ -159,9 +171,32 @@ public class CompactSplitThread implemen
       queueLists.append("\n");
     }
     
+    queueLists.append("\n");
+    queueLists.append("  Region Merge Queue:\n");
+    lq = mergePool.getQueue();
+    it = lq.iterator();
+    while (it.hasNext()) {
+      queueLists.append("    " + it.next().toString());
+      queueLists.append("\n");
+    }
+
     return queueLists.toString();
   }
 
+  public synchronized void requestRegionsMerge(final HRegion a,
+      final HRegion b, final boolean forcible) {
+    try {
+      mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Region merge requested for " + a + "," + b + ", forcible="
+            + forcible + ".  " + this);
+      }
+    } catch (RejectedExecutionException ree) {
+      LOG.warn("Could not execute merge for " + a + "," + b + ", forcible="
+          + forcible, ree);
+    }
+  }
+
   public synchronized boolean requestSplit(final HRegion r) {
     // don't split regions that are blocking
     if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) {
@@ -270,6 +305,7 @@ public class CompactSplitThread implemen
    */
   void interruptIfNecessary() {
     splits.shutdown();
+    mergePool.shutdown();
     largeCompactions.shutdown();
     smallCompactions.shutdown();
   }
@@ -291,6 +327,7 @@ public class CompactSplitThread implemen
 
   void join() {
     waitFor(splits, "Split Thread");
+    waitFor(mergePool, "Merge Thread");
     waitFor(largeCompactions, "Large Compaction Thread");
     waitFor(smallCompactions, "Small Compaction Thread");
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sun Mar 24 10:26:21 2013
@@ -66,6 +66,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -77,6 +78,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -783,6 +785,24 @@ public class HRegion implements HeapSize
     return isAvailable() && !hasReferences();
   }
 
+  /**
+   * @return true if region is mergeable
+   */
+  public boolean isMergeable() {
+    if (!isAvailable()) {
+      LOG.debug("Region " + this.getRegionNameAsString()
+          + " is not mergeable because it is closing or closed");
+      return false;
+    }
+    if (hasReferences()) {
+      LOG.debug("Region " + this.getRegionNameAsString()
+          + " is not mergeable because it has references");
+      return false;
+    }
+
+    return true;
+  }
+
   public boolean areWritesEnabled() {
     synchronized(this.writestate) {
       return this.writestate.writesEnabled;
@@ -4061,6 +4081,26 @@ public class HRegion implements HeapSize
   }
 
   /**
+   * Create a merged region given a temp directory with the region data.
+   * @param mergedRegionInfo
+   * @param region_b another merging region
+   * @return merged hregion
+   * @throws IOException
+   */
+  HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo,
+      final HRegion region_b) throws IOException {
+    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(),
+        fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
+        this.getTableDesc(), this.rsServices);
+    r.readRequestsCount.set(this.getReadRequestsCount()
+        + region_b.getReadRequestsCount());
+    r.writeRequestsCount.set(this.getWriteRequestsCount()
+        + region_b.getWriteRequestsCount());
+    this.fs.commitMergedRegion(mergedRegionInfo);
+    return r;
+  }
+
+  /**
    * Inserts a new region's meta information into the passed
    * <code>meta</code> region. Used by the HMaster bootstrap code adding
    * new table to META table.

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java Sun Mar 24 10:26:21 2013
@@ -40,8 +40,10 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.Reference;
@@ -197,6 +199,21 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Check whether region has Reference file
+   * @param htd table desciptor of the region
+   * @return true if region has reference file
+   * @throws IOException
+   */
+  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
+    for (HColumnDescriptor family : htd.getFamilies()) {
+      if (hasReferences(family.getNameAsString())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
    * @return the set of families present on disk
    * @throws IOException
    */
@@ -509,6 +526,10 @@ public class HRegionFileSystem {
     return new Path(getRegionDir(), REGION_MERGES_DIR);
   }
 
+  Path getMergesDir(final HRegionInfo hri) {
+    return new Path(getMergesDir(), hri.getEncodedName());
+  }
+
   /**
    * Clean up any merge detritus that may have been left around from previous merge attempts.
    */
@@ -516,6 +537,84 @@ public class HRegionFileSystem {
     FSUtils.deleteDirectory(fs, getMergesDir());
   }
 
+  /**
+   * Remove merged region
+   * @param mergedRegion {@link HRegionInfo}
+   * @throws IOException
+   */
+  void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
+    Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
+    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
+      throw new IOException("Failed delete of " + regionDir);
+    }
+  }
+
+  /**
+   * Create the region merges directory.
+   * @throws IOException If merges dir already exists or we fail to create it.
+   * @see HRegionFileSystem#cleanupMergesDir()
+   */
+  void createMergesDir() throws IOException {
+    Path mergesdir = getMergesDir();
+    if (fs.exists(mergesdir)) {
+      LOG.info("The " + mergesdir
+          + " directory exists.  Hence deleting it to recreate it");
+      if (!fs.delete(mergesdir, true)) {
+        throw new IOException("Failed deletion of " + mergesdir
+            + " before creating them again.");
+      }
+    }
+    if (!fs.mkdirs(mergesdir))
+      throw new IOException("Failed create of " + mergesdir);
+  }
+
+  /**
+   * Write out a merge reference under the given merges directory. Package local
+   * so it doesnt leak out of regionserver.
+   * @param mergedRegion {@link HRegionInfo} of the merged region
+   * @param familyName Column Family Name
+   * @param f File to create reference.
+   * @param mergedDir
+   * @return Path to created reference.
+   * @throws IOException
+   */
+  Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
+      final StoreFile f, final Path mergedDir)
+      throws IOException {
+    Path referenceDir = new Path(new Path(mergedDir,
+        mergedRegion.getEncodedName()), familyName);
+    // A whole reference to the store file.
+    Reference r = Reference.createTopReference(regionInfo.getStartKey());
+    // Add the referred-to regions name as a dot separated suffix.
+    // See REF_NAME_REGEX regex above. The referred-to regions name is
+    // up in the path of the passed in <code>f</code> -- parentdir is family,
+    // then the directory above is the region name.
+    String mergingRegionName = regionInfo.getEncodedName();
+    // Write reference with same file id only with the other region name as
+    // suffix and into the new region location (under same family).
+    Path p = new Path(referenceDir, f.getPath().getName() + "."
+        + mergingRegionName);
+    return r.write(fs, p);
+  }
+
+  /**
+   * Commit a merged region, moving it from the merges temporary directory to
+   * the proper location in the filesystem.
+   * @param mergedRegionInfo merged region {@link HRegionInfo}
+   * @throws IOException 
+   */
+  void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
+    Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
+    // Move the tmp dir in the expected location
+    if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
+      if (!fs.rename(mergedRegionTmpDir, regionDir)) {
+        throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
+            + regionDir);
+      }
+    }
+  }
+
   // ===========================================================================
   //  Create/Open/Delete Helpers
   // ===========================================================================

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1460306&r1=1460305&r2=1460306&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Sun Mar 24 10:26:21 2013
@@ -55,35 +55,21 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
-import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HealthCheckChore;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
-import org.apache.hadoop.hbase.exceptions.LeaseException;
-import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
-import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
-import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
-import org.apache.hadoop.hbase.exceptions.RegionMovedException;
-import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
 import org.apache.hadoop.hbase.RegionServerStatusProtocol;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.exceptions.RegionServerRunningException;
-import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
-import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
-import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
@@ -101,6 +87,21 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.exceptions.LeaseException;
+import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
+import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
+import org.apache.hadoop.hbase.exceptions.RegionMovedException;
+import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
+import org.apache.hadoop.hbase.exceptions.RegionServerRunningException;
+import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
+import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
+import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
@@ -114,7 +115,6 @@ import org.apache.hadoop.hbase.ipc.Paylo
 import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine;
 import org.apache.hadoop.hbase.ipc.RpcClientEngine;
 import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -135,6 +135,8 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
@@ -203,11 +205,11 @@ import org.apache.hadoop.hbase.util.Thre
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics.util.MBeanUtil;
 import org.apache.hadoop.net.DNS;
@@ -3503,6 +3505,35 @@ public class HRegionServer implements Cl
   }
 
   /**
+   * Merge regions on the region server.
+   *
+   * @param controller the RPC controller
+   * @param request the request
+   * @return merge regions response
+   * @throws ServiceException
+   */
+  @Override
+  @QosPriority(priority = HConstants.HIGH_QOS)
+  public MergeRegionsResponse mergeRegions(final RpcController controller,
+      final MergeRegionsRequest request) throws ServiceException {
+    try {
+      checkOpen();
+      requestCount.increment();
+      HRegion regionA = getRegion(request.getRegionA());
+      HRegion regionB = getRegion(request.getRegionB());
+      boolean forcible = request.getForcible();
+      LOG.info("Receiving merging request for  " + regionA + ", " + regionB
+          + ",forcible=" + forcible);
+      regionA.flushcache();
+      regionB.flushcache();
+      compactSplitThread.requestRegionsMerge(regionA, regionB, forcible);
+      return MergeRegionsResponse.newBuilder().build();
+    } catch (IOException ie) {
+      throw new ServiceException(ie);
+    }
+  }
+
+  /**
    * Compact a region on the region server.
    *
    * @param controller the RPC controller

Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java?rev=1460306&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java Sun Mar 24 10:26:21 2013
@@ -0,0 +1,112 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Handles processing region merges. Put in a queue, owned by HRegionServer.
+ */
+@InterfaceAudience.Private
+class RegionMergeRequest implements Runnable {
+  static final Log LOG = LogFactory.getLog(RegionMergeRequest.class);
+  private final HRegion region_a;
+  private final HRegion region_b;
+  private final HRegionServer server;
+  private final boolean forcible;
+
+  RegionMergeRequest(HRegion a, HRegion b, HRegionServer hrs, boolean forcible) {
+    Preconditions.checkNotNull(hrs);
+    this.region_a = a;
+    this.region_b = b;
+    this.server = hrs;
+    this.forcible = forcible;
+  }
+
+  @Override
+  public String toString() {
+    return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible="
+        + forcible;
+  }
+
+  @Override
+  public void run() {
+    if (this.server.isStopping() || this.server.isStopped()) {
+      LOG.debug("Skipping merge because server is stopping="
+          + this.server.isStopping() + " or stopped=" + this.server.isStopped());
+      return;
+    }
+    try {
+      final long startTime = EnvironmentEdgeManager.currentTimeMillis();
+      RegionMergeTransaction mt = new RegionMergeTransaction(region_a,
+          region_b, forcible);
+      // If prepare does not return true, for some reason -- logged inside in
+      // the prepare call -- we are not ready to merge just now. Just return.
+      if (!mt.prepare(this.server)) return;
+      try {
+        mt.execute(this.server, this.server);
+      } catch (Exception e) {
+        if (this.server.isStopping() || this.server.isStopped()) {
+          LOG.info(
+              "Skip rollback/cleanup of failed merge of " + region_a + " and "
+                  + region_b + " because server is"
+                  + (this.server.isStopping() ? " stopping" : " stopped"), e);
+          return;
+        }
+        try {
+          LOG.warn("Running rollback/cleanup of failed merge of "
+                  + region_a +" and "+ region_b + "; " + e.getMessage(), e);
+          if (mt.rollback(this.server, this.server)) {
+            LOG.info("Successful rollback of failed merge of "
+                + region_a +" and "+ region_b);
+          } else {
+            this.server.abort("Abort; we got an error after point-of-no-return"
+                + "when merging " + region_a + " and " + region_b);
+          }
+        } catch (RuntimeException ee) {
+          String msg = "Failed rollback of failed merge of "
+              + region_a +" and "+ region_b + " -- aborting server";
+          // If failed rollback, kill this server to avoid having a hole in
+          // table.
+          LOG.info(msg, ee);
+          this.server.abort(msg);
+        }
+        return;
+      }
+      LOG.info("Regions merged, META updated, and report to master. region_a="
+          + region_a + ", region_b=" + region_b + ",merged region="
+          + mt.getMergedRegionInfo().getRegionNameAsString()
+          + ". Region merge took "
+          + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime));
+    } catch (IOException ex) {
+      LOG.error("Merge failed " + this,
+          RemoteExceptionHandler.checkIOException(ex));
+      server.checkFileSystem();
+    }
+  }
+}



Mime
View raw message