hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject [3/3] hbase git commit: HBASE-12510 Make hbase-consensus independent of HRegionInfo (and other cruft removal)
Date Sat, 06 Dec 2014 20:55:30 GMT
HBASE-12510 Make hbase-consensus independent of HRegionInfo (and other cruft removal)

Summary:
We need to remove HRegionInfo from hbase-consensus (and in general all HBase dependencies, but they aren't blocking us right now). This is because:
(1) We cannot use our internal HRegionInfo while trying to have a quorum on the WAL.
(2) The open source HRegionInfo is different from ours (No QuorumInfo, no HTableDescriptor etc.)
(3) We will depend on hbase-client, while hbase-client depends on us (hbase-consensus). This is a cyclic dependency, and will fail to compile.
(4) Using HRegionInfo in hbase-consensus doesn't make sense regardless of (1 - 3) because the quorum should be independent of the underlying KV store.

Apart from this, I moved the RMap related parts to hbase-server. Need to finish a couple of TODOs there. Also, started using ServerName instead of HServerAddress, wherever possible.

Test Plan: Unit tests

Reviewers: shroffrishit, aaiyer, fantasist, adela

Subscribers: eclark, zelaine.fong

Differential Revision: https://reviews.facebook.net/D29685


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c85d5e9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c85d5e9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c85d5e9

Branch: refs/heads/HBASE-12259
Commit: 6c85d5e97b284882b2ccc448933ee3867a4bbf77
Parents: eca32aa
Author: Gaurav Menghani <gauravm@fb.com>
Authored: Mon Dec 1 13:34:22 2014 +0530
Committer: Elliott Clark <eclark@apache.org>
Committed: Sat Dec 6 12:52:40 2014 -0800

----------------------------------------------------------------------
 hbase-client/pom.xml                            |   7 +
 .../org/apache/hadoop/hbase/HRegionInfo.java    |  15 +
 .../org/apache/hadoop/hbase/HConstants.java     |   7 +-
 hbase-consensus/pom.xml                         |   5 -
 .../org/apache/hadoop/hbase/HRegionInfo.java    | 838 -------------------
 .../java/org/apache/hadoop/hbase/KeyValue.java  |  15 +-
 .../hbase/consensus/client/QuorumClient.java    |   2 +-
 .../hbase/consensus/quorum/QuorumInfo.java      |   3 +
 .../rmap/GetHydraBaseRegionInfoUtil.java        |  67 --
 .../hadoop/hbase/consensus/rmap/HDFSReader.java | 140 ----
 .../hbase/consensus/rmap/LocalReader.java       |  96 ---
 .../consensus/rmap/NoSuchRMapException.java     |  10 -
 .../hadoop/hbase/consensus/rmap/Parser.java     | 146 ----
 .../hbase/consensus/rmap/RMapConfiguration.java | 330 --------
 .../hbase/consensus/rmap/RMapException.java     |  11 -
 .../hadoop/hbase/consensus/rmap/RMapJSON.java   |  34 -
 .../hadoop/hbase/consensus/rmap/RMapReader.java | 205 -----
 .../hbase/consensus/rmap/RegionLocator.java     | 142 ----
 .../consensus/server/LocalConsensusServer.java  |   7 +-
 .../hadoop/hbase/consensus/util/RaftUtil.java   |  16 +-
 .../hbase/regionserver/wal/AbstractWAL.java     |   4 +-
 .../hadoop/hbase/consensus/LocalTestBed.java    |  46 +-
 .../hadoop/hbase/consensus/RaftTestUtil.java    |  96 ++-
 .../consensus/ReplicationLoadForUnitTest.java   |  14 +-
 .../hadoop/hbase/consensus/TestBasicCommit.java |  36 +-
 .../consensus/TestBasicLeaderElection.java      |  15 +-
 .../hbase/consensus/TestBasicPeerFailure.java   |  34 +-
 .../hbase/consensus/TestBasicPeerSeeding.java   |  22 +-
 .../hbase/consensus/TestBasicPeerSlow.java      |  28 +-
 .../hbase/consensus/TestBasicQuorumCommit.java  |  26 +-
 .../TestBasicQuorumMembershipChange.java        |  27 +-
 .../consensus/TestBasicSeedCommitIndex.java     |  34 +-
 .../hbase/consensus/TestCommitDeadline.java     |  20 +-
 .../consensus/TestLowerRankBecomingLeader.java  |  28 +-
 .../consensus/TestPersistLastVotedFor.java      |  20 +-
 .../hbase/consensus/TestRaftEventListener.java  |  25 +-
 .../fsm/TestAsyncStatesInRaftStateMachine.java  |  34 +-
 .../consensus/log/TestRemoteLogFetcher.java     |  20 +-
 .../hadoop/hbase/consensus/rmap/TestParser.java |  93 --
 .../consensus/rmap/TestRMapConfiguration.java   |  55 --
 .../hbase/consensus/rmap/TestRMapReader.java    | 102 ---
 .../hbase/consensus/rmap/TestRegionLocator.java | 180 ----
 hbase-server/pom.xml                            |  10 +
 .../hadoop/hbase/consensus/rmap/HDFSReader.java | 140 ++++
 .../hbase/consensus/rmap/LocalReader.java       |  96 +++
 .../consensus/rmap/NoSuchRMapException.java     |  10 +
 .../hadoop/hbase/consensus/rmap/Parser.java     | 153 ++++
 .../hbase/consensus/rmap/RMapConfiguration.java | 270 ++++++
 .../hbase/consensus/rmap/RMapException.java     |  11 +
 .../hadoop/hbase/consensus/rmap/RMapJSON.java   |  34 +
 .../hadoop/hbase/consensus/rmap/RMapReader.java | 205 +++++
 .../hadoop/hbase/consensus/rmap/TestParser.java |  97 +++
 .../consensus/rmap/TestRMapConfiguration.java   |  55 ++
 .../hbase/consensus/rmap/TestRMapReader.java    | 102 +++
 54 files changed, 1483 insertions(+), 2755 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 5d21ea3..0159292 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -118,6 +118,13 @@
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
     </dependency>
+    <!-- TODO enable when hbase-consensus is independent of hbase-client
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-consensus</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    -->
     <!-- General dependencies -->
     <dependency>
       <groupId>commons-codec</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 82beb0b..a423d86 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -20,9 +20,11 @@ package org.apache.hadoop.hbase;
 
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -118,6 +120,9 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
 
   private static final int MAX_REPLICA_ID = 0xFFFF;
   public static final int DEFAULT_REPLICA_ID = 0;
+
+  // Peers of the Consensus Quorum
+  // private QuorumInfo quorumInfo;
   /**
    * Does region name contain its encoded name?
    * @param regionName region name
@@ -286,6 +291,16 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
     this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID);
   }
 
+  public HRegionInfo(final TableName tableName, final byte[] startKey,
+                     final byte[] endKey, final boolean split, final long regionid,
+                     final Map<String, Map<ServerName, Integer>> peers,
+                     final Map<String, InetSocketAddress[]> favoredNodesMap)
+    throws IllegalArgumentException {
+    this(tableName, startKey, endKey, split, regionid);
+    // TODO @gauravm
+    // Set QuorumInfo
+  }
+
   /**
    * Construct HRegionInfo with explicit parameters
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 6001767..81053c9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1097,7 +1097,12 @@ public final class HConstants {
       600000;
 
   public static final String HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL =
-      "hbase.client.fast.fail.interceptor.impl"; 
+      "hbase.client.fast.fail.interceptor.impl";
+
+  public static final String RMAP_SUBSCRIPTION = "hbase.rmap.subscriptions";
+
+  public static final String HYDRABASE_DCNAMES = "hbase.hydrabase.dcnames";
+  public static final String HYDRABASE_DCNAME = "hbase.hydrabase.dcname";
 
   private HConstants() {
     // Can't be instantiated with this ctor.

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-consensus/pom.xml b/hbase-consensus/pom.xml
index 6a048eb..ad541f6 100644
--- a/hbase-consensus/pom.xml
+++ b/hbase-consensus/pom.xml
@@ -224,11 +224,6 @@
       <version>${swift.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.json</groupId>
-      <artifactId>json</artifactId>
-      <version>20090211</version>
-    </dependency>
-    <dependency>
       <groupId>commons-httpclient</groupId>
       <artifactId>commons-httpclient</artifactId>
       <version>3.1</version>

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
deleted file mode 100644
index a46e187..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ /dev/null
@@ -1,838 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.*;
-import java.util.Map.Entry;
-
-import com.google.common.base.Joiner;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
-import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo;
-import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JenkinsHash;
-import org.apache.hadoop.hbase.util.MD5Hash;
-import org.apache.hadoop.io.VersionedWritable;
-import org.apache.hadoop.io.WritableComparable;
-
-/**
- * HRegion information.
- * Contains HRegion id, start and end keys, a reference to this
- * HRegions' table descriptor, etc.
- */
-public class HRegionInfo extends VersionedWritable implements WritableComparable<HRegionInfo>{
-  private static final byte VERSION = 0;
-  private static final Log LOG = LogFactory.getLog(HRegionInfo.class);
-  protected Map<String, InetSocketAddress[]> favoredNodesMap = new HashMap<>();
-
-  /**
-   * The new format for a region name contains its encodedName at the end.
-   * The encoded name also serves as the directory name for the region
-   * in the filesystem.
-   *
-   * New region name format:
-   *    &lt;tablename>,,&lt;startkey>,&lt;regionIdTimestamp>.&lt;encodedName>.
-   * where,
-   *    &lt;encodedName> is a hex version of the MD5 hash of
-   *    &lt;tablename>,&lt;startkey>,&lt;regionIdTimestamp>
-   *
-   * The old region name format:
-   *    &lt;tablename>,&lt;startkey>,&lt;regionIdTimestamp>
-   * For region names in the old format, the encoded name is a 32-bit
-   * JenkinsHash integer value (in its decimal notation, string form).
-   *<p>
-   * **NOTE**
-   *
-   * ROOT, the first META region, and regions created by an older
-   * version of HBase (0.20 or prior) will continue to use the
-   * old region name format.
-   */
-
-  /** Separator used to demarcate the encodedName in a region name
-   * in the new format. See description on new format above.
-   */
-  private static final int ENC_SEPARATOR = '.';
-  public  static final int MD5_HEX_LENGTH   = 32;
-
-  /**
-   * Does region name contain its encoded name?
-   * @param regionName region name
-   * @return boolean indicating if this a new format region
-   *         name which contains its encoded name.
-   */
-  private static boolean hasEncodedName(final byte[] regionName) {
-    // check if region name ends in ENC_SEPARATOR
-    if ((regionName.length >= 1)
-        && (regionName[regionName.length - 1] == ENC_SEPARATOR)) {
-      // region name is new format. it contains the encoded name.
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * @param regionName
-   * @return the encodedName
-   */
-  public static String encodeRegionName(final byte [] regionName) {
-    String encodedName;
-    if (hasEncodedName(regionName)) {
-      // region is in new format:
-      // <tableName>,<startKey>,<regionIdTimeStamp>/encodedName/
-      encodedName = Bytes.toString(regionName,
-          regionName.length - MD5_HEX_LENGTH - 1,
-          MD5_HEX_LENGTH);
-    } else {
-      // old format region name. ROOT and first META region also
-      // use this format.EncodedName is the JenkinsHash value.
-      int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName,
-                                                            regionName.length,
-                                                            0));
-      encodedName = String.valueOf(hashVal);
-    }
-    return encodedName;
-  }
-
-  /** delimiter used between portions of a region name */
-  public static final int DELIMITER = ',';
-
-  /** HRegionInfo for root region */
-  public static final HRegionInfo ROOT_REGIONINFO =
-    new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC);
-
-  /** Encoded name for the root region. This is always the same. */
-  public static final String ROOT_REGION_ENCODED_NAME_STR =
-      HRegionInfo.ROOT_REGIONINFO.getEncodedName();
-
-  /** HRegionInfo for first meta region */
-  public static final HRegionInfo FIRST_META_REGIONINFO =
-    new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC);
-
-  private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
-  private boolean offLine = false;
-  private long regionId = -1;
-  private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY;
-  private String regionNameStr = "";
-  private boolean split = false;
-  private byte [] splitPoint = null;
-  private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
-  protected HTableDescriptor tableDesc = null;
-  private int hashCode = -1;
-  //TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
-  public static final String NO_HASH = null;
-  private volatile String encodedName = NO_HASH;
-
-  // Peers of the Consensus Quorum
-  private QuorumInfo quorumInfo;
-  // For compatability with non-hydrabase mode
-  public static String LOCAL_DC_KEY = "LOCAL_DC_KEY_FOR_NON_HYDRABASE_MODE";
-
-  private void setHashCode() {
-    int result = Arrays.hashCode(this.regionName);
-    result ^= this.regionId;
-    result ^= Arrays.hashCode(this.startKey);
-    result ^= Arrays.hashCode(this.endKey);
-    result ^= Boolean.valueOf(this.offLine).hashCode();
-    result ^= this.tableDesc.hashCode();
-    this.hashCode = result;
-  }
-
-  /**
-   * Private constructor used constructing HRegionInfo for the catalog root and
-   * first meta regions
-   */
-  private HRegionInfo(long regionId, HTableDescriptor tableDesc) {
-    super();
-    this.regionId = regionId;
-    this.tableDesc = tableDesc;
-
-    // Note: Root & First Meta regions names are still in old format
-    this.regionName = createRegionName(tableDesc.getName(), null,
-                                       regionId, false);
-    this.regionNameStr = Bytes.toStringBinary(this.regionName);
-    setHashCode();
-  }
-
-  /** Default constructor - creates empty object */
-  public HRegionInfo() {
-    super();
-    this.tableDesc = new HTableDescriptor();
-  }
-
-  /**
-   * Construct HRegionInfo with explicit parameters
-   *
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(final HTableDescriptor tableDesc, final byte [] startKey,
-      final byte [] endKey)
-  throws IllegalArgumentException {
-    this(tableDesc, startKey, endKey, false);
-  }
-
-  /**
-   * Construct HRegionInfo with explicit parameters
-   *
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @param split true if this region has split and we have daughter regions
-   * regions that may or may not hold references to this region.
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
-      final byte [] endKey, final boolean split)
-  throws IllegalArgumentException {
-    this(tableDesc, startKey, endKey, split, System.currentTimeMillis());
-  }
-
-  /**
-   * Construct HRegionInfo with explicit parameters
-   *
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @param split true if this region has split and we have daughter regions
-   * regions that may or may not hold references to this region.
-   * @param regionid Region id to use.
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
-    final byte [] endKey, final boolean split, final long regionid)
-  throws IllegalArgumentException {
-    this(tableDesc, startKey, endKey, split, regionid, null, null);
-  }
-
-  /**
-   * Construct HRegionInfo with explicit parameters
-   *
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @param split true if this region has split and we have daughter regions
-   * regions that may or may not hold references to this region.
-   * @param regionid Region id to use.
-   * @throws IllegalArgumentException
-   */
-  public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
-    final byte [] endKey, final boolean split, final long regionid,
-      final Map<String, Map<HServerAddress, Integer>> peers,
-    final Map<String, InetSocketAddress[]> favoredNodesMap)
-  throws IllegalArgumentException {
-    super();
-    if (tableDesc == null) {
-      throw new IllegalArgumentException("tableDesc cannot be null");
-    }
-    this.offLine = false;
-    this.regionId = regionid;
-    this.regionName = createRegionName(tableDesc.getName(), startKey, regionId,
-        !tableDesc.isMetaRegion());
-    this.regionNameStr = Bytes.toStringBinary(this.regionName);
-    this.split = split;
-    this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
-    this.startKey = startKey == null?
-      HConstants.EMPTY_START_ROW: startKey.clone();
-    this.tableDesc = tableDesc;
-    this.quorumInfo = new QuorumInfo(peers, getEncodedName());
-    this.favoredNodesMap = favoredNodesMap == null ?
-            new HashMap<String, InetSocketAddress[]>() : favoredNodesMap;
-    setHashCode();
-  }
-
-  /**
-   * Costruct a copy of another HRegionInfo
-   *
-   * @param other
-   */
-  public HRegionInfo(HRegionInfo other) {
-    super();
-    this.endKey = other.getEndKey();
-    this.offLine = other.isOffline();
-    this.regionId = other.getRegionId();
-    this.regionName = other.getRegionName();
-    this.regionNameStr = Bytes.toStringBinary(this.regionName);
-    this.split = other.isSplit();
-    this.startKey = other.getStartKey();
-    this.tableDesc = other.getTableDesc();
-    this.hashCode = other.hashCode();
-    this.encodedName = other.getEncodedName();
-    this.quorumInfo = other.quorumInfo;
-    this.favoredNodesMap = other.favoredNodesMap;
-  }
-
-  private static byte [] createRegionName(final byte [] tableName,
-      final byte [] startKey, final long regionid, boolean newFormat) {
-    return createRegionName(tableName, startKey, Long.toString(regionid), newFormat);
-  }
-
-  /**
-   * Make a region name of passed parameters.
-   * @param tableName
-   * @param startKey Can be null
-   * @param id Region id.
-   * @param newFormat should we create the region name in the new format
-   *                  (such that it contains its encoded name?).
-   * @return Region name made of passed tableName, startKey and id
-   */
-  public static byte [] createRegionName(final byte [] tableName,
-      final byte [] startKey, final String id, boolean newFormat) {
-    return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat);
-  }
-  /**
-   * Make a region name of passed parameters.
-   * @param tableName
-   * @param startKey Can be null
-   * @param id Region id
-   * @param newFormat should we create the region name in the new format
-   *                  (such that it contains its encoded name?).
-   * @return Region name made of passed tableName, startKey and id
-   */
-  public static byte [] createRegionName(final byte [] tableName,
-      final byte [] startKey, final byte [] id, boolean newFormat) {
-    byte [] b = new byte [tableName.length + 2 + id.length +
-       (startKey == null? 0: startKey.length) +
-       (newFormat ? (MD5_HEX_LENGTH + 2) : 0)];
-
-    int offset = tableName.length;
-    System.arraycopy(tableName, 0, b, 0, offset);
-    b[offset++] = DELIMITER;
-    if (startKey != null && startKey.length > 0) {
-      System.arraycopy(startKey, 0, b, offset, startKey.length);
-      offset += startKey.length;
-    }
-    b[offset++] = DELIMITER;
-    System.arraycopy(id, 0, b, offset, id.length);
-    offset += id.length;
-
-    if (newFormat) {
-      //
-      // Encoded name should be built into the region name.
-      //
-      // Use the region name thus far (namely, <tablename>,<startKey>,<id>)
-      // to compute a MD5 hash to be used as the encoded name, and append
-      // it to the byte buffer.
-      //
-      String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset);
-      byte [] md5HashBytes = Bytes.toBytes(md5Hash);
-
-      if (md5HashBytes.length != MD5_HEX_LENGTH) {
-        LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH +
-                  "; Got=" + md5HashBytes.length);
-      }
-
-      // now append the bytes '.<encodedName>.' to the end
-      b[offset++] = ENC_SEPARATOR;
-      System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH);
-      offset += MD5_HEX_LENGTH;
-      b[offset++] = ENC_SEPARATOR;
-    }
-
-    return b;
-  }
-
-  /**
-   * Separate elements of a regionName.
-   * @param regionName
-   * @return Array of byte[] containing tableName, startKey and id
-   * @throws IOException
-   */
-  public static byte [][] parseRegionName(final byte [] regionName)
-  throws IOException {
-    int offset = -1;
-    for (int i = 0; i < regionName.length; i++) {
-      if (regionName[i] == DELIMITER) {
-        offset = i;
-        break;
-      }
-    }
-    if(offset == -1) {
-      throw new IOException("Invalid regionName format: " +
-                            Bytes.toStringBinary(regionName));
-    }
-    byte [] tableName = new byte[offset];
-    System.arraycopy(regionName, 0, tableName, 0, offset);
-    offset = -1;
-    for (int i = regionName.length - 1; i > 0; i--) {
-      if(regionName[i] == DELIMITER) {
-        offset = i;
-        break;
-      }
-    }
-    if(offset == -1) {
-      throw new IOException("Invalid regionName format: " +
-                            Bytes.toStringBinary(regionName));
-    }
-    byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
-    if(offset != tableName.length + 1) {
-      startKey = new byte[offset - tableName.length - 1];
-      System.arraycopy(regionName, tableName.length + 1, startKey, 0,
-          offset - tableName.length - 1);
-    }
-    byte [] id = new byte[regionName.length - offset - 1];
-    System.arraycopy(regionName, offset + 1, id, 0,
-        regionName.length - offset - 1);
-    byte [][] elements = new byte[3][];
-    elements[0] = tableName;
-    elements[1] = startKey;
-    elements[2] = id;
-    return elements;
-  }
-
-  /** @return the regionId */
-  public long getRegionId(){
-    return regionId;
-  }
-
-  /**
-   * @return the regionName as an array of bytes.
-   * @see #getRegionNameAsString()
-   */
-  public byte [] getRegionName(){
-    return regionName;
-  }
-
-  /**
-   * @return Region name as a String for use in logging, etc.
-   */
-  public String getRegionNameAsString() {
-    if (hasEncodedName(this.regionName)) {
-      // new format region names already have their encoded name.
-      return this.regionNameStr;
-    }
-
-    // old format. regionNameStr doesn't have the region name.
-    //
-    //
-    return this.regionNameStr + "." + this.getEncodedName();
-  }
-
-  /** @return the encoded region name */
-  public synchronized String getEncodedName() {
-    if (this.encodedName == NO_HASH) {
-      this.encodedName = encodeRegionName(this.regionName);
-    }
-    return this.encodedName;
-  }
-
-  /** @return the startKey */
-  public byte [] getStartKey(){
-    return startKey;
-  }
-
-  /** @return the endKey */
-  public byte [] getEndKey(){
-    return endKey;
-  }
-
-  /**
-   * Returns true if the given inclusive range of rows is fully contained
-   * by this region. For example, if the region is foo,a,g and this is
-   * passed ["b","c"] or ["a","c"] it will return true, but if this is passed
-   * ["b","z"] it will return false.
-   * @throws IllegalArgumentException if the range passed is invalid (ie end < start)
-   */
-  public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
-    if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) {
-      throw new IllegalArgumentException(
-      "Invalid range: " + Bytes.toStringBinary(rangeStartKey) +
-      " > " + Bytes.toStringBinary(rangeEndKey));
-    }
-
-    boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0;
-    boolean lastKeyInRange =
-      Bytes.compareTo(rangeEndKey, endKey) < 0 ||
-      Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY);
-    return firstKeyInRange && lastKeyInRange;
-  }
-
-  /**
-   * Return true if the given row falls in this region.
-   */
-  public boolean containsRow(byte[] row) {
-    return Bytes.compareTo(row, startKey) >= 0 &&
-      (Bytes.compareTo(row, endKey) < 0 ||
-       Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY));
-  }
-
-  /** @return the tableDesc */
-  public HTableDescriptor getTableDesc(){
-    return tableDesc;
-  }
-
-  /**
-   * @param newDesc new table descriptor to use
-   */
-  public void setTableDesc(HTableDescriptor newDesc) {
-    this.tableDesc = newDesc;
-  }
-
-  /** @return true if this is the root region */
-  public boolean isRootRegion() {
-    return this.tableDesc.isRootRegion();
-  }
-
-  /** @return true if this is the meta table */
-  public boolean isMetaTable() {
-    return this.tableDesc.isMetaTable();
-  }
-
-  /** @return true if this region is a meta region */
-  public boolean isMetaRegion() {
-    return this.tableDesc.isMetaRegion();
-  }
-
-  /**
-   * @return True if has been split and has daughters.
-   */
-  public boolean isSplit() {
-    return this.split;
-  }
-
-  /**
-   * @param split set split status
-   */
-  public void setSplit(boolean split) {
-    this.split = split;
-  }
-
-  /**
-   * @return point to explicitly split the region on
-   */
-  public byte[] getSplitPoint() {
-    return (this.splitPoint != null && this.splitPoint.length > 0)
-      ? this.splitPoint : null;
-  }
-
-  /**
-   * @param splitPoint set split status & position to split on
-   */
-  public void setSplitPoint(byte[] splitPoint) {
-    this.split = true;
-    this.splitPoint = splitPoint;
-  }
-
-  /**
-   * @return True if this region is offline.
-   */
-  public boolean isOffline() {
-    return this.offLine;
-  }
-
-  /**
-   * @param offLine set online - offline status
-   */
-  public void setOffline(boolean offLine) {
-    this.offLine = offLine;
-  }
-
-  /**
-   * @see java.lang.Object#toString()
-   */
-  @Override
-  public String toString() {
-    return String.format("REGION => {%s => '%s', STARTKEY => '%s', " +
-            "ENDKEY => '%s', ENCODED => %s, OFFLINE => %s, SPLIT => %s, " +
-            "TABLE => {%s}, FAVORED_NODES_MAP => {%s}}",
-            HConstants.NAME, regionNameStr, Bytes.toStringBinary(startKey),
-            Bytes.toStringBinary(endKey), getEncodedName(), isOffline(),
-            isSplit(), tableDesc.toString(),
-            favoredNodesMap != null ? prettyPrintFavoredNodesMap() : "");
-  }
-  
-  /**
-   * @see java.lang.Object#equals(java.lang.Object)
-   *
-   * TODO (arjen): this does not consider split and split point!
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null) {
-      return false;
-    }
-    if (!(o instanceof HRegionInfo)) {
-      return false;
-    }
-
-    HRegionInfo that = (HRegionInfo)o;
-    if (this.compareTo(that) != 0) {
-      return false;
-    }
-
-    if (this.quorumInfo == null && that.quorumInfo != null) {
-      return false;
-    }
-    if (this.quorumInfo != null && !this.quorumInfo.equals(that.quorumInfo)) {
-      return false;
-    }
-
-    return hasSameFavoredNodesMap(that);
-  }
-
-  /**
-   * @see java.lang.Object#hashCode()
-   */
-  @Override
-  public int hashCode() {
-    return this.hashCode;
-  }
-
-  /** @return the object version number */
-  @Override
-  public byte getVersion() {
-    return VERSION;
-  }
-
-  //
-  // Writable
-  //
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    Bytes.writeByteArray(out, endKey);
-    out.writeBoolean(offLine);
-    out.writeLong(regionId);
-    Bytes.writeByteArray(out, regionName);
-    out.writeBoolean(split);
-    if (split) {
-      Bytes.writeByteArray(out, splitPoint);
-    }
-    Bytes.writeByteArray(out, startKey);
-    tableDesc.write(out);
-    out.writeInt(hashCode);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.endKey = Bytes.readByteArray(in);
-    this.offLine = in.readBoolean();
-    this.regionId = in.readLong();
-    this.regionName = Bytes.readByteArray(in);
-    this.regionNameStr = Bytes.toStringBinary(this.regionName);
-    this.split = in.readBoolean();
-    if (this.split) {
-      this.splitPoint = Bytes.readByteArray(in);
-    }
-    this.startKey = Bytes.readByteArray(in);
-    this.tableDesc.readFields(in);
-    this.hashCode = in.readInt();
-    if (quorumInfo == null) {
-      quorumInfo = new QuorumInfo(
-        new HashMap<String, Map<HServerAddress, Integer>>(),
-        HRegionInfo.encodeRegionName(regionName));
-    }
-  }
-
-  //
-  // Comparable
-  //
-
-  public int compareTo(HRegionInfo o) {
-    if (o == null) {
-      return 1;
-    }
-
-    // Are regions of same table?
-    int result = this.tableDesc.compareTo(o.tableDesc);
-    if (result != 0) {
-      return result;
-    }
-
-    // Compare start keys.
-    result = Bytes.compareTo(this.startKey, o.startKey);
-    if (result != 0) {
-      return result;
-    }
-
-    // Compare end keys.
-    return Bytes.compareTo(this.endKey, o.endKey);
-  }
-
-  /**
-   * @return Comparator to use comparing {@link KeyValue}s.
-   */
-  public KVComparator getComparator() {
-    return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()?
-      KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
-  }
-
-  public Map<HServerAddress,Integer> getPeersWithRank() {
-    return getQuorumInfo().getPeersWithRank();
-  }
-
-  public Map<HServerAddress, String> getPeersWithCluster() {
-    return getQuorumInfo().getPeersWithCluster();
-  }
-
-  @Deprecated
-  public InetSocketAddress[] getFavoredNodes() {
-    return getFavoredNodes(LOCAL_DC_KEY);
-  }
-
-  public InetSocketAddress[] getFavoredNodes(String dcKey) {
-    return this.favoredNodesMap != null?
-              this.favoredNodesMap.get(dcKey):
-                null;
-  }
-
-  @Deprecated
-  public void setFavoredNodes(InetSocketAddress[] favoredNodes) {
-    setFavoredNodes(LOCAL_DC_KEY, favoredNodes);
-  }
-
-  public void setFavoredNodes(String dcName, InetSocketAddress[] favoredNodes) {
-    if (this.favoredNodesMap == null) {
-      this.favoredNodesMap = new HashMap<>();
-    }
-    this.favoredNodesMap.put(dcName, favoredNodes);
-    setHashCode();
-  }
-
-  public void setPeers(Map<String, Map<HServerAddress, Integer>> peers) {
-    this.quorumInfo.setPeers(peers);
-  }
-
-  public Map<String, Map<HServerAddress, Integer>> getPeers() {
-    QuorumInfo quorumInfo = getQuorumInfo();
-    if (quorumInfo != null) {
-      return quorumInfo.getPeers();
-    }
-    return null;
-  }
-
-  public Map<String, InetSocketAddress[]> getFavoredNodesMap() {
-    return favoredNodesMap;
-  }
-
-  public void setFavoredNodesMap(
-          final Map<String, InetSocketAddress[]> favoredNodesMap) {
-    this.favoredNodesMap = favoredNodesMap;
-  }
-
-  public boolean hasSameFavoredNodesMap(final HRegionInfo that) {
-    if (that == null) {
-      return false;
-    }
-
-    if (!this.favoredNodesMap.keySet().equals(that.favoredNodesMap.keySet())) {
-      return false;
-    }
-
-    for (String domain : this.favoredNodesMap.keySet()) {
-      if (!Arrays.equals(this.favoredNodesMap.get(domain),
-              that.favoredNodesMap.get(domain))) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  public QuorumInfo getQuorumInfo() {
-    return quorumInfo;
-  }
-
-  public void setQuorumInfo(final QuorumInfo quorumInfo) {
-    this.quorumInfo = quorumInfo;
-  }
-
-  public String prettyPrintFavoredNodesMap() {
-    if (favoredNodesMap == null) {
-      return "";
-    }
-    StringBuilder sb = new StringBuilder(128);
-    Iterator<Entry<String, InetSocketAddress[]>> it
-            = favoredNodesMap.entrySet().iterator();
-    while (it.hasNext()) {
-      Map.Entry<String, InetSocketAddress[]> domain = it.next();
-      InetSocketAddress[] favoredNodes = domain.getValue();
-      sb.append(domain.getKey());
-      sb.append(" => [");
-      if (favoredNodes != null) {
-        sb.append(Joiner.on(", ").join(favoredNodes));
-      }
-      sb.append(it.hasNext() ? "], " : "]");
-    }
-    return sb.toString();
-  }
-
-  public static class MultiDCHRegionInfo extends HRegionInfo {
-    private Map<String, Map<HServerAddress, Integer>> combinedPeersMap;
-
-    public MultiDCHRegionInfo(String dcsite, HRegionInfo regionInfo) {
-      super(regionInfo);
-      this.favoredNodesMap = new HashMap<>();
-      this.favoredNodesMap.put(dcsite, regionInfo.getFavoredNodes());
-      this.combinedPeersMap = regionInfo.getPeers();
-    }
-
-    public void merge(String otherDC, HRegionInfo other) {
-      this.favoredNodesMap.put(otherDC, other.getFavoredNodes());
-    }
-
-    public void validate(int quorumSize, Map<String, Integer> maxPeersPerDC)
-      throws IllegalArgument {
-      if (favoredNodesMap.size() == 0) {
-        return;
-      }
-
-      int rankNum = quorumSize;
-      for (String cluster : maxPeersPerDC.keySet()) {
-        int numPeerAssignedPerDC = maxPeersPerDC.get(cluster).intValue();
-        if (combinedPeersMap.get(cluster) == null) {
-          combinedPeersMap.put(cluster, new HashMap
-            <HServerAddress, Integer>());
-        }
-        InetSocketAddress[] peerAddr = favoredNodesMap.get(cluster);
-        for (InetSocketAddress addr : peerAddr) {
-          this.combinedPeersMap.get(cluster).put(new HServerAddress(addr), rankNum--);
-          if (--numPeerAssignedPerDC == 0) {
-            break;
-          }
-        }
-        if (rankNum <= 0) {
-          break;
-        }
-      }
-
-      if (rankNum > 0) {
-        throw new IllegalArgument("Not enough nodes to complete the peer" +
-          " the peer assignment.");
-      }
-    }
-
-    @Override
-    public Map<String, Map<HServerAddress, Integer>> getPeers() {
-      return combinedPeersMap;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 8a9dce6..5a55683 100644
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -42,8 +42,6 @@ import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.Writable;
 
-import com.google.common.primitives.Longs;
-
 /**
  * An HBase Key/Value.
  *
@@ -82,6 +80,7 @@ public final class KeyValue implements Writable, HeapSize, Cloneable {
    * Colon character in UTF-8
    */
   public static final char COLUMN_FAMILY_DELIMITER = ':';
+  public static final int DELIMITER = ',';
 
   public static final byte[] COLUMN_FAMILY_DELIM_ARRAY =
     new byte[]{COLUMN_FAMILY_DELIMITER};
@@ -1931,11 +1930,11 @@ public final class KeyValue implements Writable, HeapSize, Cloneable {
       int lmetaOffsetPlusDelimiter = loffset + metalength;
       int leftFarDelimiter = getDelimiterInReverse(left,
           lmetaOffsetPlusDelimiter,
-          llength - metalength, HRegionInfo.DELIMITER);
+          llength - metalength, DELIMITER);
       int rmetaOffsetPlusDelimiter = roffset + metalength;
       int rightFarDelimiter = getDelimiterInReverse(right,
           rmetaOffsetPlusDelimiter, rlength - metalength,
-          HRegionInfo.DELIMITER);
+          DELIMITER);
       if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) {
         // Nothing between .META. and regionid.  Its first key.
         return -1;
@@ -1986,9 +1985,9 @@ public final class KeyValue implements Writable, HeapSize, Cloneable {
       //        LOG.info("META " + Bytes.toString(left, loffset, llength) +
       //          "---" + Bytes.toString(right, roffset, rlength));
       int leftDelimiter = getDelimiter(left, loffset, llength,
-          HRegionInfo.DELIMITER);
+          DELIMITER);
       int rightDelimiter = getDelimiter(right, roffset, rlength,
-          HRegionInfo.DELIMITER);
+          DELIMITER);
       if (leftDelimiter < 0 && rightDelimiter >= 0) {
         // Nothing between .META. and regionid.  Its first key.
         return -1;
@@ -2008,10 +2007,10 @@ public final class KeyValue implements Writable, HeapSize, Cloneable {
       leftDelimiter++;
       rightDelimiter++;
       int leftFarDelimiter = getRequiredDelimiterInReverse(left, leftDelimiter,
-          llength - (leftDelimiter - loffset), HRegionInfo.DELIMITER);
+          llength - (leftDelimiter - loffset), DELIMITER);
       int rightFarDelimiter = getRequiredDelimiterInReverse(right,
           rightDelimiter, rlength - (rightDelimiter - roffset),
-          HRegionInfo.DELIMITER);
+          DELIMITER);
       // Now compare middlesection of row.
       result = super.compareRows(left, leftDelimiter,
           leftFarDelimiter - leftDelimiter, right, rightDelimiter,

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java
index 61c042d..2901a54 100644
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java
@@ -69,7 +69,7 @@ public class QuorumClient {
 
   protected QuorumClient(String regionId, final Configuration conf,
                          ExecutorService pool) throws IOException {
-    this(RaftUtil.createDummyRegionInfo(regionId).getQuorumInfo(), conf, pool);
+    this(RaftUtil.createDummyQuorumInfo(regionId), conf, pool);
   }
 
   public synchronized long replicateCommits(List<WALEdit> txns)

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java
index 66e5406..8755cbb 100644
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java
@@ -14,6 +14,9 @@ public class QuorumInfo {
           + Bytes.SIZEOF_BYTE   // Payload type
           + Bytes.SIZEOF_BYTE;  // Payload version
 
+  // For compatability with non-hydrabase mode
+  public static String LOCAL_DC_KEY = "LOCAL_DC_KEY_FOR_NON_HYDRABASE_MODE";
+
   private Map<String, Map<HServerAddress, Integer>> peers = null;
   private Map<HServerAddress, Integer> peersWithRank = null;
   private Set<String> peersAsString = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java
deleted file mode 100644
index 1dd4b03..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java
+++ /dev/null
@@ -1,67 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.log4j.Level;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-
-/**
- * Takes comma-separated list of (full/partial) region-names and output the
- * required information about that region
- */
-public class GetHydraBaseRegionInfoUtil {
-  private static Logger LOG = LoggerFactory.getLogger(
-    GetHydraBaseRegionInfoUtil.class);
-
-  public static void main(String[] args) throws IOException, RMapException {
-
-    // Silent the noisy o/p
-    org.apache.log4j.Logger.getLogger(
-      "org.apache.zookeeper").setLevel(Level.ERROR);
-    org.apache.log4j.Logger.getLogger(
-      "org.apache.hadoop.conf.ClientConfigurationUtil").setLevel(Level.ERROR);
-    org.apache.log4j.Logger.getLogger(
-      "org.apache.hadoop.fs").setLevel(Level.ERROR);
-    org.apache.log4j.Logger.getLogger(
-      "org.apache.hadoop.util.NativeCodeLoader").setLevel(Level.ERROR);
-
-    String[] regions = args[0].split(",");
-    Configuration conf = HBaseConfiguration.create();
-    RMapConfiguration rMapConfiguration = new RMapConfiguration(conf);
-
-    Map<String, HRegionInfo> regionInfoMap = new HashMap<>();
-    List<HRegionInfo> regionInfoList;
-
-    URI uri = rMapConfiguration.getRMapSubscription(conf);
-    if (uri != null) {
-      rMapConfiguration.readRMap(uri);
-      regionInfoList = rMapConfiguration.getRegions(uri);
-      for (HRegionInfo r : regionInfoList) {
-        regionInfoMap.put(r.getEncodedName(), r);
-      }
-    }
-
-    HRegionInfo region;
-    for (String regionName : regions) {
-      if ((region = regionInfoMap.get(regionName)) != null) {
-        LOG.info(String.format("%s:[table: %s, start_key: %s, " +
-          "end_key: %s, peers: %s]", regionName,
-          region.getTableDesc().getNameAsString(),
-          Bytes.toStringBinary(region.getStartKey()),
-          Bytes.toStringBinary(region.getEndKey()),
-          region.getQuorumInfo().getPeersAsString()));
-      } else {
-        LOG.error("No region found with encoded name " + regionName);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java
deleted file mode 100644
index 7d6b0f7..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java
+++ /dev/null
@@ -1,140 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-public class HDFSReader extends RMapReader {
-  protected static final Logger LOG = LoggerFactory.getLogger(HDFSReader.class);
-
-  private Configuration conf;
-
-  public HDFSReader(final Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public List<Long> getVersions(URI uri) throws IOException {
-    Path path = new Path(getSchemeAndPath(uri));
-    FileSystem fs = path.getFileSystem(conf);
-    FileStatus[] statuses = fs.globStatus(new Path(path.toString() + ".*"));
-
-    List<Long> versions = new ArrayList<>(statuses.length);
-    for (FileStatus status : statuses) {
-      long version = getVersionFromPath(status.getPath().toString());
-      if (version > 0) {
-        versions.add(version);
-      }
-    }
-    Collections.sort(versions);
-    return versions;
-  }
-
-  @Override
-  public URI resolveSymbolicVersion(URI uri) throws URISyntaxException {
-    long version = getVersion(uri);
-    String schemeAndPath = getSchemeAndPath(uri);
-
-    if (version == RMapReader.CURRENT || version == RMapReader.NEXT) {
-      Path link = new Path(String.format("%s.%s", schemeAndPath,
-              version == RMapReader.CURRENT ? "CURRENT" : "NEXT"));
-      // Resolve to an explicit version, or UNKNOWN
-      try {
-        Path target = getLinkTarget(link);
-        version = target != null ? getVersionFromPath(target.toString()) :
-                RMapReader.UNKNOWN;
-      } catch (IOException e) {
-        LOG.error("Failed to look up version from link:", e);
-        version = RMapReader.UNKNOWN;
-      }
-    }
-
-    if (version > 0) {
-      return new URI(String.format("%s?version=%d", schemeAndPath, version));
-    }
-    return new URI(schemeAndPath);
-  }
-
-  @Override
-  public String readRMapAsString(final URI uri) throws IOException {
-    // Get file status, throws IOException if the path does not exist.
-    Path path = getPathWithVersion(uri);
-    FileSystem fs = path.getFileSystem(conf);
-    FileStatus status = fs.getFileStatus(path);
-
-    long n = status.getLen();
-    if (n < 0 || n > MAX_SIZE_BYTES) {
-      throw new IOException(String.format("Invalid RMap file size " +
-              "(expected between 0 and %d but got %d bytes)",
-              MAX_SIZE_BYTES, n));
-    }
-
-    byte[] buf = new byte[(int)n];
-    FSDataInputStream stream = fs.open(path);
-    try {
-      stream.readFully(buf);
-    } finally {
-      stream.close();
-    }
-    return Bytes.toString(buf);
-  }
-
-  public Path getPathWithVersion(final URI uri) throws IOException {
-    long version = RMapReader.UNKNOWN;
-    try {
-      version = getVersion(resolveSymbolicVersion(uri));
-    } catch (URISyntaxException e) {
-      // Ignore invalid URIs and assume version UNKNOWN
-    }
-
-    if (version > 0) {
-      return new Path(String.format("%s.%d", getSchemeAndPath(uri), version));
-    }
-    return new Path(uri.toString());
-  }
-
-  private long getVersionFromPath(final String path) {
-    String[] tokens = path.split("[\\.]");
-    try {
-      return Long.parseLong(tokens[tokens.length - 1]);
-    } catch (NumberFormatException e) {
-      // Skip if token not numerical
-    }
-    return RMapReader.UNKNOWN;
-  }
-
-  private Path getLinkTarget(final Path path) throws IOException {
-    FileSystem fs = path.getFileSystem(conf);
-
-    // The getHardLinkedFiles call is a bit tricky, as it effectively returns
-    // all other paths to the inode shared with the given path. In order to
-    // guard against erroneous links, only consider those where the paths
-    // are the same, up to the version.
-    String pathWithoutVersion = path.toString().substring(0,
-            path.toString().lastIndexOf('.'));
-    /*
-TODO: FIXME: Amit: this code works with the internal hdfs. might not work with the
-OSS version.
-
-    for (String link : fs.getHardLinkedFiles(path)) {
-      if (path.toString().startsWith(pathWithoutVersion) &&
-              getVersionFromPath(link) > 0) {
-        return new Path(link);
-      }
-    }
-    */
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java
deleted file mode 100644
index fc1e877..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java
+++ /dev/null
@@ -1,96 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-public class LocalReader extends RMapReader {
-  protected static final Logger LOG = LoggerFactory.getLogger(
-          LocalReader.class);
-
-  @Override
-  public List<Long> getVersions(final URI uri) throws IOException {
-    Path path = Paths.get(uri);
-    List<Long> versions = new ArrayList<>();
-
-    for (Path match : Files.newDirectoryStream(path.getParent(),
-            path.getFileName() + ".*")) {
-      long version = getVersionFromPath(match.toString());
-      if (version > 0) {
-        versions.add(version);
-      }
-    }
-    Collections.sort(versions);
-    return versions;
-  }
-
-  @Override
-  public URI resolveSymbolicVersion(URI uri) throws URISyntaxException {
-    long version = getVersion(uri);
-    String schemeAndPath = getSchemeAndPath(uri);
-
-    if (version == RMapReader.CURRENT || version == RMapReader.NEXT) {
-      Path link = Paths.get(String.format("%s.%s", schemeAndPath,
-              version == RMapReader.CURRENT ? "CURRENT" : "NEXT"));
-      // Resolve to an explicit version, or UNKNOWN
-      try {
-        version = getVersionFromPath(Files.readSymbolicLink(link).toString());
-      } catch (IOException e) {
-        LOG.error("Failed to look up version from link:", e);
-        version = RMapReader.UNKNOWN;
-      }
-    }
-
-    if (version > 0) {
-      return new URI(String.format("%s?version=%d", schemeAndPath, version));
-    }
-    return new URI(schemeAndPath);
-  }
-
-  @Override
-  public String readRMapAsString(final URI uri) throws IOException {
-    Path path = getPathWithVersion(uri);
-
-    long n = Files.size(path);
-    if (n < 0 || n > MAX_SIZE_BYTES) {
-      throw new IOException(String.format("Invalid RMap file size " +
-              "(expected between 0 and %d but got %d bytes)",
-              MAX_SIZE_BYTES, n));
-    }
-
-    return new String(Files.readAllBytes(path));
-  }
-
-  private long getVersionFromPath(final String path) {
-    String[] tokens = path.split("[\\.]");
-    try {
-      return Long.parseLong(tokens[tokens.length - 1]);
-    } catch (NumberFormatException e) {
-      // Skip if token not numerical
-    }
-    return RMapReader.UNKNOWN;
-  }
-
-  private Path getPathWithVersion(final URI uri) {
-    long version = RMapReader.UNKNOWN;
-    try {
-      version = getVersion(resolveSymbolicVersion(uri));
-    } catch (URISyntaxException e) {
-      // Ignore invalid URIs and assume version UNKNOWN
-    }
-
-    if (version > 0) {
-      return Paths.get(String.format("%s.%d", uri.getPath(), version));
-    }
-    return Paths.get(uri);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java
deleted file mode 100644
index 6136063..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java
+++ /dev/null
@@ -1,10 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import java.io.IOException;
-import java.net.URI;
-
-public class NoSuchRMapException extends IOException {
-  public NoSuchRMapException(final URI uri) {
-    super("No RMap found with URI " + uri);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java
deleted file mode 100644
index f345b1a..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java
+++ /dev/null
@@ -1,146 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-import java.net.InetSocketAddress;
-import java.util.*;
-
-public class Parser {
-  private Configuration conf;
-
-  public Parser(final Configuration conf) {
-    this.conf = conf;
-  }
-
-  public List<HRegionInfo> parseEncodedRMap(JSONObject encodedRMap)
-          throws JSONException {
-    List<HRegionInfo> regions = new ArrayList<>();
-    JSONObject tables = encodedRMap.getJSONObject("tables");
-
-    for (Iterator<String> names = tables.keys(); names.hasNext();) {
-      String name = names.next();
-      regions.addAll(parseTable(name, tables.getJSONObject(name)));
-    }
-
-    return regions;
-  }
-
-  public List<HRegionInfo> parseTable(String name, JSONObject table)
-          throws JSONException {
-    HTableDescriptor tableDesc = new HTableDescriptor(name);
-    List<HRegionInfo> regions = Collections.emptyList();
-    Iterator<String> keys = table.keys();
-    while (keys.hasNext()) {
-      String key = keys.next();
-      if (key.equals("families")) {
-        JSONObject families = table.getJSONObject(key);
-        Iterator<String> familyKeys = families.keys();
-        while (familyKeys.hasNext()) {
-          String familyName = familyKeys.next();
-          JSONObject familyJson = families.getJSONObject(familyName);
-          tableDesc.addFamily(parseFamily(familyName, familyJson));
-        }
-      } else if (key.equals("regions")) {
-        JSONArray regionsJson = table.getJSONArray(key);
-        int length = regionsJson.length();
-        regions = new ArrayList<>(length);
-        for (int i = 0; i < length; ++i) {
-          regions.add(parseRegion(tableDesc, regionsJson.getJSONObject(i)));
-        }
-      } else {
-        String value = table.get(key).toString();
-        tableDesc.setValue(key, value);
-      }
-    }
-    return regions;
-  }
-
-  public HColumnDescriptor parseFamily(String name, JSONObject family)
-          throws JSONException {
-    HColumnDescriptor columnDesc = new HColumnDescriptor();
-    columnDesc.setName(Bytes.toBytes(name));
-    Iterator<String> keys = family.keys();
-    while (keys.hasNext()) {
-      String key = keys.next();
-      String value = family.get(key).toString();
-      columnDesc.setValue(key, value);
-    }
-    return columnDesc;
-  }
-
-  public HRegionInfo parseRegion(HTableDescriptor table, JSONObject region)
-          throws JSONException {
-    long id = region.getLong("id");
-    byte[] startKey = Bytes.toBytes(region.getString("start_key"));
-    byte[] endKey = Bytes.toBytes(region.getString("end_key"));
-    Map<String, Map<HServerAddress, Integer>> peers = parsePeers(region
-            .getJSONObject("peers"));
-    Map<String, InetSocketAddress[]> favoredNodesMap = parseFavoredNodesMap(region
-            .getJSONObject("favored_nodes"));
-    return new HRegionInfo(table, startKey, endKey, false, id, peers,
-            favoredNodesMap);
-  }
-
-  public Map<String, Map<HServerAddress, Integer>> parsePeers(JSONObject peersJson)
-          throws JSONException {
-    Map<String, Map<HServerAddress, Integer>> peers = new LinkedHashMap<>();
-    Iterator<String> keys = peersJson.keys();
-    while (keys.hasNext()) {
-      String cellName = keys.next();
-      JSONArray peersWithRank = peersJson.getJSONArray(cellName);
-      peers.put(cellName, parsePeersWithRank(peersWithRank));
-    }
-    return peers;
-  }
-
-  public Map<HServerAddress, Integer> parsePeersWithRank(JSONArray peersJson)
-          throws JSONException {
-    Map<HServerAddress, Integer> peers = new LinkedHashMap<HServerAddress, Integer>();
-    for (int i = 0; i < peersJson.length(); ++i) {
-      String peer = peersJson.getString(i);
-      int colonIndex = peer.lastIndexOf(':');
-      peers.put(new HServerAddress(peer.substring(0, colonIndex)),
-              Integer.valueOf(peer.substring(colonIndex + 1)));
-    }
-    return peers;
-  }
-
-  Map<String, InetSocketAddress[]> parseFavoredNodesMap(JSONObject favoredNodesJson)
-          throws JSONException {
-    Iterator<String> keys = favoredNodesJson.keys();
-
-    HashMap<String, InetSocketAddress[]> favoredNodesMap = new HashMap<>();
-    while (keys.hasNext()) {
-      String cellName = keys.next();
-      JSONArray peersWithRank = favoredNodesJson.getJSONArray(cellName);
-      favoredNodesMap.put(cellName, parseFavoredNodes(peersWithRank));
-    }
-    return favoredNodesMap;
-  }
-
-  public InetSocketAddress[] parseFavoredNodes(JSONArray favoredNodesInCell)
-          throws JSONException {
-    if (favoredNodesInCell == null) {
-      return null;
-    } else {
-      int length = favoredNodesInCell.length();
-      InetSocketAddress[] favoredNodes = new InetSocketAddress[length];
-      for (int i = 0; i < length; ++i) {
-        String node = favoredNodesInCell.getString(i);
-        int colonIndex = node.lastIndexOf(':');
-        favoredNodes[i] = new InetSocketAddress(node.substring(0, colonIndex),
-                Integer.parseInt(node.substring(colonIndex + 1)));
-
-      }
-      return favoredNodes;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java
deleted file mode 100644
index 00306dc..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java
+++ /dev/null
@@ -1,330 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.json.JSONException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-public class RMapConfiguration {
-  private static final Logger LOG = LoggerFactory.getLogger(RMapConfiguration.class);
-
-  private Configuration conf;
-
-  private Map<String, RMap> appliedRMaps;
-  private Map<URI, RMap> cachedRMaps;
-
-  public RMapConfiguration(final Configuration conf) {
-    this.conf = conf;
-    this.appliedRMaps = new HashMap<>();
-    this.cachedRMaps = new HashMap<>();
-  }
-
-  public static URI getRMapSubscription(final Configuration conf) {
-    String[] subscriptionsList =
-      conf.get(HConstants.RMAP_SUBSCRIPTION, "").split(",");
-    if (subscriptionsList.length >= 1) {
-      if (subscriptionsList.length > 1) {
-        LOG.warn(String.format("We do not support multiple RMaps. " +
-          "Using the first RMap as the correct one: %s", subscriptionsList[0]));
-      }
-      else if (!subscriptionsList[0].equals("")) {
-        try {
-          return new URI(subscriptionsList[0]);
-        } catch (URISyntaxException e) {
-          LOG.warn(String.format("Failed to parse URI for subscription %s: ",
-            subscriptionsList[0]), e);
-        }
-      }
-    }
-    return null;
-  }
-
-  public static RMapReader getRMapReader(final Configuration conf,
-          final URI uri) throws RMapException {
-    switch (uri.getScheme()) {
-      case "file":
-        return new LocalReader();
-      case "hdfs":
-        return new HDFSReader(conf);
-      default:
-        throw new RMapException("No reader found for RMap: " + uri);
-    }
-  }
-
-  public synchronized RMap getRMap(URI uri)
-      throws IOException, RMapException {
-    return getRMap(uri, false);
-  }
-
-  public synchronized RMap getRMap(URI uri, boolean reload)
-      throws IOException, RMapException {
-    try {
-      RMapReader reader = getRMapReader(conf, uri);
-      URI nonSymbolicURI = reader.resolveSymbolicVersion(uri);
-      // Try to get a cached instance of the RMap.
-      RMap rmap = cachedRMaps.get(nonSymbolicURI);
-      if (reload || rmap == null) {
-        // No cached instance was found, read it using the reader.
-        RMapJSON encodedRMap = reader.readRMap(nonSymbolicURI);
-        rmap = new RMap(encodedRMap.uri,
-            new Parser(conf).parseEncodedRMap(encodedRMap.getEncodedRMap()),
-            encodedRMap.signature);
-        cachedRMaps.put(rmap.uri, rmap);
-      }
-      return rmap;
-    } catch (URISyntaxException e) {
-      throw new RMapException("URI syntax invalid for RMap: " + uri, e);
-    } catch (JSONException e) {
-      throw new RMapException("Failed to decode JSON for RMap: " + uri, e);
-    }
-  }
-
-  /**
-   * Reads and caches the RMap from the given URI and returns its signature.
-   *
-   * @param uri
-   * @return
-   */
-  public synchronized String readRMap(final URI uri) throws IOException,
-          RMapException {
-    return getRMap(uri).signature;
-  }
-
-  public synchronized String readRMap(URI uri, boolean reload)
-      throws IOException, RMapException {
-    return getRMap(uri, reload).signature;
-  }
-
-  /**
-   * Get the list of regions which need to be updated in order to transition to
-   * this (version) of the RMap by the given URI.
-   *
-   * @param uri of the RMap
-   * @return a list of regions
-   */
-  public synchronized Collection<HRegionInfo> getTransitionDelta(final URI uri)
-          throws IOException, RMapException {
-    RMap nextRMap = getRMap(uri);
-    RMap currentRMap = appliedRMaps.get(RMapReader.getSchemeAndPath(uri));
-
-    // The standard Set implementations seem to be using compareTo() for their
-    // operations. On the HRegionInfo objects compareTo() and equals() have
-    // different properties where equals() is needed here. What follows is a
-    // poor mans Set comparison to determine which regions need to be modified
-    // to make the RMap transition.
-    if (nextRMap != null) {
-      HashMap<String, HRegionInfo> delta = new HashMap<>();
-      for (HRegionInfo next : nextRMap.regions) {
-        delta.put(next.getEncodedName(), next);
-      }
-
-      if (currentRMap != null) {
-        // Remove all regions already present in the current RMap from the
-        // delta. This should use the {@link HRegionInfo.equals} method as it
-        // should consider the favored nodes and replicas.
-        for (HRegionInfo current : currentRMap.regions) {
-          HRegionInfo next = delta.get(current.getEncodedName());
-          if (next != null) {
-            if (next.equals(current)) {
-              delta.remove(next.getEncodedName());
-            }
-          }
-        }
-      }
-
-      return delta.values();
-    }
-
-    return Collections.emptyList();
-  }
-
-  public synchronized void appliedRMap(final URI uri) throws IOException,
-          RMapException {
-    RMap previous = appliedRMaps.put(RMapReader.getSchemeAndPath(uri),
-        getRMap(uri));
-    // Purge the earlier version of the RMap from cache.
-    if (previous != null) {
-      cachedRMaps.remove(previous.uri);
-    }
-  }
-
-  public synchronized boolean isRMapApplied(final URI uri) {
-    RMap active = appliedRMaps.get(RMapReader.getSchemeAndPath(uri));
-    if (active != null) {
-      return active.uri.equals(uri);
-    }
-    return false;
-  }
-
-  public synchronized RMap getAppliedRMap(String uri) {
-    return appliedRMaps.get(uri);
-  }
-
-  public synchronized List<HRegionInfo> getRegions(final URI uri)
-          throws IOException, RMapException {
-    RMap rmap = getRMap(uri);
-    if (rmap == null) {
-      return Collections.emptyList();
-    }
-    return Collections.unmodifiableList(rmap.regions);
-  }
-
-  public synchronized void clearFromRMapCache(URI uri) {
-    cachedRMaps.remove(uri);
-  }
-
-  /**
-   * Replace the content of cached RMap. For testing only!
-   *
-   * @param uri
-   * @param rMap
-   */
-  public synchronized void cacheCustomRMap(URI uri, RMap rMap) {
-    cachedRMaps.put(uri, rMap);
-    appliedRMaps.put(uri.toString(), rMap);
-  }
-
-  public class RMap {
-    public final URI uri;
-    public final List<HRegionInfo> regions;
-    public final String signature;
-
-    RMap(final URI uri, final List<HRegionInfo> regions,
-         final String signature) {
-      this.uri = uri;
-      this.regions = regions;
-      this.signature = signature;
-    }
-
-    /**
-     * Return the quorum size in the RMap.
-     * @return
-     */
-    public int getQuorumSize() {
-      if (regions.size() == 0) {
-        return 0;
-      }
-      return regions.get(0).getQuorumInfo().getQuorumSize();
-    }
-
-    /**
-     * Return the list of regions that are served by the specified server.
-     * @param hServerAddress
-     * @return
-     */
-    public List<HRegionInfo> getRegionsForServer(HServerAddress hServerAddress) {
-      List<HRegionInfo> ret = new ArrayList<HRegionInfo>();
-      for (HRegionInfo region: regions) {
-        if (region.getPeersWithRank().containsKey(hServerAddress)) {
-          ret.add(region);
-        }
-      }
-      return ret;
-    }
-
-    /**
-     * Returns the set of servers that are hosting any of the regions in the RMap.
-     * @return
-     */
-    public Set<HServerAddress> getAllServers() {
-      Set<HServerAddress> ret = new HashSet<>();
-      for (HRegionInfo region: regions) {
-        ret.addAll(region.getPeersWithRank().keySet());
-      }
-      return ret;
-    }
-
-    /**
-     * Create a customized RMap for test use only!
-     *
-     * @param uri
-     * @param regions
-     * @param signature
-     * @return
-     */
-    public RMap createCustomizedRMap(URI uri,
-                                     List<HRegionInfo> regions,
-                                     String signature) {
-      return new RMapConfiguration.RMap(
-          uri == null ? this.uri : uri,
-          regions == null ? this.regions : regions,
-          signature == null ? this.signature : signature
-      );
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj == null || !(obj instanceof RMap)) {
-        return false;
-      }
-      RMap that = (RMap)obj;
-      if (this.regions == null || that.regions == null || this.regions.size() != that.regions.size()) {
-        return false;
-      }
-      Set<HRegionInfo> regionInfos = new TreeSet<>();
-      regionInfos.addAll(regions);
-      for (HRegionInfo region : that.regions) {
-        if (!regionInfos.contains(region)) {
-          return false;
-        }
-        regionInfos.remove(region);
-      }
-      return regionInfos.isEmpty();
-    }
-  }
-
-  /**
-   * Creates a temporary name for an RMap, based on the date and time.
-   * @return
-   */
-  public static String createRMapName() {
-    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd-HHmmss");
-    return "rmap.json." + format.format(System.currentTimeMillis());
-  }
-
-  /**
-   * View information about an RMap. Currently only prints its signature.
-   * @param args
-   */
-  public static void main(String[] args) throws ParseException,
-    URISyntaxException, RMapException, IOException {
-    Options options = new Options();
-    options.addOption("r", "rmap", true, "Name of the rmap");
-
-    CommandLineParser parser = new PosixParser();
-    CommandLine cmd = parser.parse(options, args);
-
-    if (!cmd.hasOption("r")) {
-      System.out.println("Please specify the rmap with -r");
-      return;
-    }
-
-    String rmapUriStr = cmd.getOptionValue("r");
-    RMapConfiguration conf = new RMapConfiguration(new Configuration());
-    String rmapStr = conf.readRMap(new URI(rmapUriStr));
-    LOG.debug("RMap Signature: " + rmapStr);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java
deleted file mode 100644
index 31621ab..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-public class RMapException extends Exception {
-  public RMapException(final String message) {
-    super(message);
-  }
-
-  public RMapException(final String message, final Throwable cause) {
-    super(message, cause);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java
deleted file mode 100644
index 6d06123..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java
+++ /dev/null
@@ -1,34 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.json.JSONObject;
-
-import java.net.URI;
-
-public class RMapJSON {
-  final URI uri;
-  final JSONObject rmap;
-  final String signature;
-
-  public RMapJSON(final URI uri, final JSONObject rmap,
-                  final String signature) {
-    this.uri = uri;
-    this.rmap = rmap;
-    this.signature = signature;
-  }
-
-  public long getVersion() {
-    return RMapReader.getVersion(uri);
-  }
-
-  public URI getURI() {
-    return uri;
-  }
-
-  public JSONObject getEncodedRMap() {
-    return rmap;
-  }
-
-  public String getSignature() {
-    return signature;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java
deleted file mode 100644
index dc81d34..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java
+++ /dev/null
@@ -1,205 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.commons.codec.binary.Hex;
-//import org.apache.hadoop.hbase.thrift.generated.Hbase;
-import org.apache.http.NameValuePair;
-import org.apache.http.client.utils.URLEncodedUtils;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.List;
-
-public abstract class RMapReader {
-  /** Max file sisze of a single file containing a RMap */
-  public static long MAX_SIZE_BYTES = 16 * 1024 * 1204; // 16 MB
-
-  /** RMap version special values */
-  public static long NEXT = -2;
-  public static long CURRENT = -1;
-  public static long UNKNOWN = 0;
-
-  /**
-   * Return a naturally sorted list of available versions of a given RMap URI.
-   *
-   * @param uri URI of the RMap
-   * @return a naturally sorted list of versions of the given RMap URI
-   * @throws IOException if an exception occurs while reading versions
-   */
-  public abstract List<Long> getVersions(final URI uri) throws IOException;
-
-  /**
-   * Resolve a URI containing a symbolic version into a URI with an absolute
-   * value which can be requested from the reader.
-   *
-   * @param uri URI containing a symbolic version
-   * @return a URI containing an absolute version
-   * @throws URISyntaxException if the given URI is malformed
-   */
-  public abstract URI resolveSymbolicVersion(final URI uri)
-          throws URISyntaxException;
-
-  /**
-   * Return the contents of the RMap at given URI as a string.
-   *
-   * @param uri URI of the RMap
-   * @return contents of the RMap as String
-   * @throws IOException if an exception occurs while reading the RMap
-   */
-  public abstract String readRMapAsString(final URI uri) throws IOException;
-
-  /**
-   * Return the version number of the RMap specified in the given URI.
-   *
-   * @param uri URI of the RMap
-   * @return the version number of the RMap or 0 if no version was found
-   */
-  public static long getVersion(final URI uri) {
-    for (NameValuePair param : URLEncodedUtils.parse(uri, "UTF-8")) {
-      if (param.getName().equals("version")) {
-        switch (param.getValue().toUpperCase()) {
-          case "NEXT":
-            return NEXT;
-          case "CURRENT":
-            return CURRENT;
-          default:
-            try {
-              return Long.parseLong(param.getValue());
-            } catch (NumberFormatException e) {
-              /* Ignore if NaN */
-            }
-        }
-      }
-    }
-    return UNKNOWN;
-  }
-
-  public static boolean isSymbolicVersion(final URI uri) {
-    return getVersion(uri) < 0;
-  }
-
-  /**
-   * Read and return a {@link RMapJSON} of the RMap at the given URI.
-   *
-   * @param uri URI of the RMap
-   * @return a JSON representation of the RMap
-   * @throws IOException if an (possible transient) exception occurs while
-   *        reading the RMap
-   * @throws RMapException if any other exception occurs while reading the RMap
-   */
-  public RMapJSON readRMap(final URI uri) throws IOException, RMapException {
-    URI nonSymbolicURI;
-    try {
-      nonSymbolicURI = resolveSymbolicVersion(uri);
-      String encodedRMap = readRMapAsString(nonSymbolicURI);
-      return new RMapJSON(nonSymbolicURI, new JSONObject(encodedRMap),
-              getSignature(encodedRMap));
-    } catch (URISyntaxException e) {
-      throw new RMapException("URI syntax invalid for RMap: " + uri, e);
-    } catch (JSONException e) {
-      throw new RMapException(
-              "Failed to decode JSON string for RMap: " + uri, e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new RMapException(
-              "Failed to generate signature for RMap: " + uri, e);
-    }
-  }
-
-  /**
-   * Get a MD5 hash of the given string.
-   *
-   * @param s string to be hashed
-   * @return a hex String representation of the hash
-   * @throws NoSuchAlgorithmException if MD5 message digest is unavailable
-   */
-  public static String getSignature(final String s)
-          throws NoSuchAlgorithmException {
-    return new String(Hex.encodeHex(
-            MessageDigest.getInstance("MD5").digest(s.getBytes())));
-  }
-
-  /**
-   * Get a MD5 hash of the given string.
-   *
-   * @param s string to be hashed
-   * @return a hex String representation of the hash
-   * @throws NoSuchAlgorithmException if MD5 message digest is unavailable
-   */
-  public String getSignature(final URI uri) throws IOException, RMapException {
-    URI nonSymbolicURI;
-    try {
-      nonSymbolicURI = resolveSymbolicVersion(uri);
-      String encodedRMap = readRMapAsString(nonSymbolicURI);
-      return getSignature(encodedRMap);
-    } catch (URISyntaxException e) {
-      throw new RMapException("URI syntax invalid for RMap: " + uri, e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new RMapException(
-              "Failed to generate signature for RMap: " + uri, e);
-    }
-  }
-
-  /**
-   * Get the scheme, authority (if present) and path of a given URI as a string.
-   * @param uri
-   * @return a string containing just the scheme, authority and path
-   */
-  public static String getSchemeAndPath(final URI uri) {
-    return String.format("%s:%s%s", uri.getScheme(),
-            uri.getAuthority() != null ?
-                    String.format("//%s", uri.getAuthority()) : "",
-            uri.getPath());
-  }
-
-  /**
-   * Get a versioned URI for the RMap with given scheme, path and version.
-   * @param schemeAndPath
-   * @param version
-   * @return a URI of the form [scheme]:[authority]//[path]?version=[version]
-   * @throws URISyntaxException
-   */
-  public static URI getVersionedURI(final String schemeAndPath,
-          final long version) throws URISyntaxException {
-    String token = "UNKNOWN";
-
-    if (version > 0) {
-      token = String.format("%d", version);
-    } else if (version == CURRENT) {
-      token = "CURRENT";
-    } else if (version == NEXT) {
-      token = "NEXT";
-    }
-
-    return new URI(String.format("%s?version=%s", schemeAndPath, token));
-  }
-
-  /**
-   * Get a versioned URI for the RMap with given base URI and version. If the
-   * given URI already contains a version it is overwritten by the given
-   * version.
-   * @param uri
-   * @param version
-   * @return a URI of the form [scheme]:[authority]//[path]?version=[version]
-   * @throws URISyntaxException
-   */
-  public static URI getVersionedURI(final URI uri, final long version)
-          throws URISyntaxException {
-    return getVersionedURI(getSchemeAndPath(uri), version);
-  }
-
-  public long getCurrentVersion(final String schemeAndPath)
-          throws URISyntaxException {
-    return getVersion(resolveSymbolicVersion(
-            getVersionedURI(schemeAndPath, CURRENT)));
-  }
-
-  public long getNextVersion(final String schemeAndPath)
-          throws URISyntaxException {
-    return getVersion(resolveSymbolicVersion(
-            getVersionedURI(schemeAndPath, NEXT)));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java
deleted file mode 100644
index 6dfaa57..0000000
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.apache.hadoop.hbase.consensus.rmap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-
-public class RegionLocator {
-  private static final Logger LOG = LoggerFactory.getLogger(
-          RegionLocator.class);
-
-  private Configuration conf;
-
-  // regionInfoMap is a mapping from table name to region start key to
-  // HRegionInfo. This will be used in locateRegion and in turn in
-  // HConnection.locateRegion, so it needs to be thread-safe as the same
-  // HConnection can be used from multiple threads at the same time
-  ConcurrentHashMap<String, ConcurrentSkipListMap<byte[], HRegionInfo>>
-          regionInfoMap = new ConcurrentHashMap<>();
-
-  public RegionLocator(final Configuration conf) {
-    this.conf = conf;
-  }
-
-  public HRegionInfo findRegion(byte[] tableName, byte[] row) {
-    ConcurrentSkipListMap<byte[], HRegionInfo> regions =
-            regionInfoMap.get(Bytes.toString(tableName));
-    if (regions != null) {
-      Map.Entry<byte[], HRegionInfo> entry = regions.floorEntry(row);
-      if (entry != null) {
-        return entry.getValue();
-      }
-    }
-    return null;
-  }
-
-  public List<HTableDescriptor> getAllTables() {
-    List<HTableDescriptor> tables = new ArrayList<>(regionInfoMap.size());
-    for (ConcurrentSkipListMap<byte[], HRegionInfo> regionMapForTable :
-            regionInfoMap.values()) {
-      if (regionMapForTable.size() > 0) {
-        tables.add(regionMapForTable.firstEntry().getValue().getTableDesc());
-      }
-    }
-    return tables;
-  }
-
-  public List<List<HRegionInfo>> getAllRegionsGroupByTable() {
-    List<List<HRegionInfo>> regions = new ArrayList<>(regionInfoMap.size());
-    for (ConcurrentSkipListMap<byte[], HRegionInfo> regionMapForTable :
-            regionInfoMap.values()) {
-      regions.add(new ArrayList<>(regionMapForTable.values()));
-    }
-    return regions;
-  }
-
-  /**
-   * Get all servers found in the regionInfo map. This method iterates over all
-   * HRegionInfo entries and thus might be expensive.
-   *
-   * @return a set containing all servers found in the region map
-   */
-  public Set<HServerAddress> getAllServers() {
-    Set<HServerAddress> servers = new HashSet<>();
-    for (ConcurrentSkipListMap<byte[], HRegionInfo> regionMapForTable :
-            regionInfoMap.values()) {
-      for (HRegionInfo region : regionMapForTable.values()) {
-        for (HServerAddress server : region.getPeersWithRank().keySet()) {
-          servers.add(server);
-        }
-      }
-    }
-    return servers;
-  }
-
-  public List<HRegionInfo> getRegionsForTable(byte[] tableName) {
-    ConcurrentSkipListMap<byte[], HRegionInfo> regions =
-            regionInfoMap.get(Bytes.toString(tableName));
-    if (regions != null) {
-      return new ArrayList<>(regions.values());
-    } else {
-      return null;
-    }
-  }
-
-  public List<HRegionInfo> getRegionsForServer(final HServerAddress address) {
-    List<HRegionInfo> regions = new ArrayList<>();
-    for (ConcurrentSkipListMap<byte[], HRegionInfo> regionMapForTable :
-            regionInfoMap.values()) {
-      for (HRegionInfo region : regionMapForTable.values()) {
-        if (region.getPeersWithRank().containsKey(address)) {
-          regions.add(region);
-        }
-      }
-    }
-    return regions;
-  }
-
-  private void updateRegionInfoMap(final List<HRegionInfo> regions) {
-    for (HRegionInfo region : regions) {
-      String tableName = region.getTableDesc().getNameAsString();
-      ConcurrentSkipListMap<byte[], HRegionInfo> regionMapForTable
-              = regionInfoMap.get(tableName);
-      if (regionMapForTable == null) {
-        regionMapForTable = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
-        regionInfoMap.put(tableName, regionMapForTable);
-      }
-      regionMapForTable.put(region.getStartKey(), region);
-    }
-  }
-
-  public void refresh() throws IOException, RMapException {
-    Parser parser = new Parser(conf);
-
-    URI uri = RMapConfiguration.getRMapSubscription(conf);
-    if (uri != null) {
-      RMapReader reader = RMapConfiguration.getRMapReader(conf, uri);
-
-      try {
-        JSONObject encodedRMap = reader.readRMap(uri).getEncodedRMap();
-        updateRegionInfoMap(parser.parseEncodedRMap(encodedRMap));
-      } catch (JSONException e) {
-        throw new RMapException("Failed to decode JSON for RMap: " + uri, e);
-      }
-    }
-  }
-
-  public boolean isEmpty() {
-    return regionInfoMap.isEmpty();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c85d5e9/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java
index 3de7ab8..777d917 100644
--- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java
@@ -19,7 +19,6 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.consensus.client.QuorumClient;
 import org.apache.hadoop.hbase.consensus.quorum.AggregateTimer;
@@ -321,12 +320,12 @@ public class LocalConsensusServer {
     }
 
     // Set the region with the peers
-    HRegionInfo regionInfo = RaftUtil.createDummyRegionInfo(regionId, peers);
+    QuorumInfo quorumInfo = RaftUtil.createDummyQuorumInfo(regionId, peers);
 
     // Create the RaftQuorumContext
-    RaftQuorumContext context = new RaftQuorumContext(regionInfo.getQuorumInfo(),
+    RaftQuorumContext context = new RaftQuorumContext(quorumInfo,
       configuration, localHost,
-      (regionInfo.getTableDesc().getNameAsString() + "."),
+      (regionId + "."),
       consensusServer.aggregateTimer,
       consensusServer.serialExecutorService,
       consensusServer.execServiceForThriftClients


Mime
View raw message