hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject [19/19] hbase git commit: HBASE-12476 HydraBase consensus protocol
Date Tue, 25 Nov 2014 20:29:12 GMT
HBASE-12476 HydraBase consensus protocol

Signed-off-by: Elliott Clark <elliott@fb.com>
Signed-off-by: Elliott Clark <eclark@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eca32aa4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eca32aa4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eca32aa4

Branch: refs/heads/HBASE-12259
Commit: eca32aa4ac4538b0334ea83d3b3bc57ec29e7f65
Parents: e6b4300
Author: Gaurav Menghani <gauravm@fb.com>
Authored: Fri Nov 21 11:26:55 2014 -0800
Committer: Elliott Clark <eclark@apache.org>
Committed: Tue Nov 25 12:17:42 2014 -0800

----------------------------------------------------------------------
 hbase-consensus/pom.xml                         |  349 +++
 .../apache/hadoop/hbase/HBaseConfiguration.java |  145 ++
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 1001 ++++++++
 .../org/apache/hadoop/hbase/HConstants.java     | 1296 ++++++++++
 .../org/apache/hadoop/hbase/HRegionInfo.java    |  838 +++++++
 .../org/apache/hadoop/hbase/HServerAddress.java |  243 ++
 .../apache/hadoop/hbase/HTableDescriptor.java   |  792 ++++++
 .../java/org/apache/hadoop/hbase/KeyValue.java  | 2300 ++++++++++++++++++
 .../apache/hadoop/hbase/RegionException.java    |   56 +
 .../client/NoLeaderForRegionException.java      |   22 +
 .../hbase/conf/ConfigurationObserver.java       |   36 +
 .../hbase/consensus/client/FetchTask.java       |   65 +
 .../hbase/consensus/client/QuorumClient.java    |  285 +++
 .../consensus/client/QuorumLoadTestClient.java  |  161 ++
 .../client/QuorumThriftClientAgent.java         |  342 +++
 .../consensus/client/QuorumThriftClientCLI.java |   72 +
 .../CommitQueueOverloadedException.java         |    9 +
 .../exceptions/LeaderNotReadyException.java     |    9 +
 .../exceptions/NewLeaderException.java          |   20 +
 .../exceptions/NotEnoughMemoryException.java    |   16 +
 .../hadoop/hbase/consensus/fsm/Conditional.java |    9 +
 .../consensus/fsm/ConstitutentFSMService.java   |  178 ++
 .../hadoop/hbase/consensus/fsm/Event.java       |   52 +
 .../hadoop/hbase/consensus/fsm/EventType.java   |    3 +
 .../fsm/FSMLargeOpsExecutorService.java         |   91 +
 .../hadoop/hbase/consensus/fsm/FSMMetrics.java  |   24 +
 .../hbase/consensus/fsm/FiniteStateMachine.java |  200 ++
 .../consensus/fsm/FiniteStateMachineIf.java     |   57 +
 .../fsm/FiniteStateMachineService.java          |   67 +
 .../fsm/FiniteStateMachineServiceImpl.java      |  132 +
 .../hbase/consensus/fsm/MutableContext.java     |    3 +
 .../hadoop/hbase/consensus/fsm/OnEvent.java     |   13 +
 .../hadoop/hbase/consensus/fsm/State.java       |   67 +
 .../hadoop/hbase/consensus/fsm/StateType.java   |    3 +
 .../hadoop/hbase/consensus/fsm/Transition.java  |   33 +
 .../hbase/consensus/fsm/TransitionType.java     |    3 +
 .../hbase/consensus/fsm/Unconditional.java      |    9 +
 .../apache/hadoop/hbase/consensus/fsm/Util.java |   27 +
 .../hbase/consensus/log/CachedFileChannel.java  |  232 ++
 .../consensus/log/CandidateLogsManager.java     | 1249 ++++++++++
 .../log/CommitLogManagerInterface.java          |   69 +
 .../hbase/consensus/log/InMemoryLogManager.java |  242 ++
 .../consensus/log/LogFetchPlanCreator.java      |   13 +
 .../hadoop/hbase/consensus/log/LogFileInfo.java |  138 ++
 .../consensus/log/LogFileInfoIterator.java      |  109 +
 .../hbase/consensus/log/LogFileInterface.java   |   25 +
 .../hbase/consensus/log/LogFileViewer.java      |  108 +
 .../hadoop/hbase/consensus/log/LogReader.java   |  392 +++
 .../hadoop/hbase/consensus/log/LogWriter.java   |  201 ++
 .../hbase/consensus/log/RandomAccessLog.java    |  451 ++++
 .../hadoop/hbase/consensus/log/ReadOnlyLog.java |  185 ++
 .../hbase/consensus/log/RemoteLogFetcher.java   |   96 +
 .../hadoop/hbase/consensus/log/SeedLogFile.java |   52 +
 .../consensus/log/TransactionLogCreator.java    |  151 ++
 .../consensus/log/TransactionLogManager.java    | 1827 ++++++++++++++
 .../consensus/metrics/ConsensusMetrics.java     |  379 +++
 .../hbase/consensus/metrics/PeerMetrics.java    |  113 +
 .../hbase/consensus/protocol/ConsensusHost.java |   55 +
 .../protocol/DataStoreCommitEvent.java          |   37 +
 .../hadoop/hbase/consensus/protocol/EditId.java |  101 +
 .../hbase/consensus/protocol/Payload.java       |   25 +
 .../consensus/quorum/AbstractPeerManager.java   |  134 +
 .../hbase/consensus/quorum/AggregateTimer.java  |   93 +
 .../quorum/AppendConsensusSession.java          |  247 ++
 .../quorum/AppendConsensusSessionInterface.java |   17 +
 .../quorum/AppendResponseCallBack.java          |   26 +
 .../consensus/quorum/ConsensusSession.java      |    9 +
 .../consensus/quorum/ConstituentTimer.java      |  109 +
 .../quorum/HeartbeatTimeoutCallback.java        |   31 +
 .../consensus/quorum/ImmutableRaftContext.java  |  147 ++
 .../quorum/JointAppendConsensusSession.java     |   97 +
 .../quorum/JointConsensusPeerManager.java       |  179 ++
 .../quorum/JointVoteConsensusSession.java       |   56 +
 .../consensus/quorum/MutableRaftContext.java    |  125 +
 .../consensus/quorum/PeerManagerInterface.java  |   39 +
 .../quorum/ProgressTimeoutCallback.java         |   32 +
 .../hbase/consensus/quorum/QuorumAgent.java     |  463 ++++
 .../hbase/consensus/quorum/QuorumInfo.java      |  357 +++
 .../quorum/QuorumMembershipChangeRequest.java   |   38 +
 .../consensus/quorum/RaftQuorumContext.java     | 1415 +++++++++++
 .../hbase/consensus/quorum/RepeatingTimer.java  |   93 +
 .../hbase/consensus/quorum/ReseedRequest.java   |   27 +
 .../hbase/consensus/quorum/SessionResult.java   |    9 +
 .../quorum/SingleConsensusPeerManager.java      |   83 +
 .../consensus/quorum/TimeoutEventHandler.java   |    5 +
 .../hadoop/hbase/consensus/quorum/Timer.java    |   12 +
 .../consensus/quorum/VoteConsensusSession.java  |  101 +
 .../quorum/VoteConsensusSessionInterface.java   |   11 +
 .../hbase/consensus/raft/RaftStateMachine.java  |  231 ++
 .../raft/events/AppendNotCompleted.java         |   32 +
 .../raft/events/AppendRequestEvent.java         |   23 +
 .../raft/events/AppendRequestTimeout.java       |   24 +
 .../raft/events/AppendResponseEvent.java        |   17 +
 .../consensus/raft/events/AppendRetry.java      |   28 +
 .../consensus/raft/events/AppendSucceeded.java  |   28 +
 .../consensus/raft/events/IsCandidate.java      |   23 +
 .../hbase/consensus/raft/events/IsFollower.java |   22 +
 .../hbase/consensus/raft/events/IsLeader.java   |   23 +
 .../events/IsTransactionLogNotAccessible.java   |   19 +
 .../consensus/raft/events/NeedStepDown.java     |   29 +
 .../NoQuorumMembershipChangeInProgress.java     |   23 +
 .../raft/events/ProgressTimeoutEvent.java       |    9 +
 .../events/QuorumMembershipChangeEvent.java     |   18 +
 .../QuorumMembershipChangeInProgress.java       |   23 +
 .../consensus/raft/events/RaftEventType.java    |   19 +
 .../raft/events/ReplicateEntriesEvent.java      |   90 +
 .../raft/events/ReseedRequestEvent.java         |   23 +
 .../hbase/consensus/raft/events/StartEvent.java |    9 +
 .../hbase/consensus/raft/events/VoteFailed.java |   28 +
 .../consensus/raft/events/VoteNotCompleted.java |   31 +
 .../consensus/raft/events/VoteRequestEvent.java |   24 +
 .../raft/events/VoteResponseEvent.java          |   17 +
 .../consensus/raft/events/VoteSucceeded.java    |   29 +
 .../hbase/consensus/raft/states/AckClient.java  |   28 +
 .../consensus/raft/states/BecomeFollower.java   |   27 +
 .../consensus/raft/states/BecomeLeader.java     |   73 +
 .../hbase/consensus/raft/states/Candidate.java  |   19 +
 .../raft/states/ChangeQuorumMembership.java     |  167 ++
 .../hbase/consensus/raft/states/Follower.java   |   19 +
 .../hbase/consensus/raft/states/Halt.java       |   26 +
 .../raft/states/HandleAppendRequest.java        |  402 +++
 .../raft/states/HandleAppendResponse.java       |   64 +
 .../HandleQuorumMembershipChangeRequest.java    |  108 +
 .../raft/states/HandleReseedRequest.java        |   40 +
 .../raft/states/HandleVoteRequest.java          |  105 +
 .../raft/states/HandleVoteResponse.java         |   59 +
 .../hbase/consensus/raft/states/Leader.java     |   19 +
 .../consensus/raft/states/ProgressTimeout.java  |   18 +
 .../consensus/raft/states/RaftAsyncState.java   |   17 +
 .../hbase/consensus/raft/states/RaftState.java  |   29 +
 .../consensus/raft/states/RaftStateType.java    |   35 +
 .../raft/states/ReSendAppendRequest.java        |   31 +
 .../raft/states/SendAppendRequest.java          |   82 +
 .../consensus/raft/states/SendVoteRequest.java  |  101 +
 .../hbase/consensus/raft/states/Start.java      |   18 +
 .../raft/transitions/RaftTransitionType.java    |   44 +
 .../rmap/GetHydraBaseRegionInfoUtil.java        |   67 +
 .../hadoop/hbase/consensus/rmap/HDFSReader.java |  140 ++
 .../hbase/consensus/rmap/LocalReader.java       |   96 +
 .../consensus/rmap/NoSuchRMapException.java     |   10 +
 .../hadoop/hbase/consensus/rmap/Parser.java     |  146 ++
 .../hbase/consensus/rmap/RMapConfiguration.java |  330 +++
 .../hbase/consensus/rmap/RMapException.java     |   11 +
 .../hadoop/hbase/consensus/rmap/RMapJSON.java   |   34 +
 .../hadoop/hbase/consensus/rmap/RMapReader.java |  205 ++
 .../hbase/consensus/rmap/RegionLocator.java     |  142 ++
 .../hbase/consensus/rpc/AppendRequest.java      |  217 ++
 .../hbase/consensus/rpc/AppendResponse.java     |   94 +
 .../hadoop/hbase/consensus/rpc/LogState.java    |  151 ++
 .../hadoop/hbase/consensus/rpc/PeerStatus.java  |  107 +
 .../hadoop/hbase/consensus/rpc/Request.java     |    8 +
 .../hadoop/hbase/consensus/rpc/VoteRequest.java |   94 +
 .../hbase/consensus/rpc/VoteResponse.java       |   61 +
 .../consensus/server/ConsensusService.java      |   70 +
 .../consensus/server/ConsensusServiceImpl.java  |  248 ++
 .../InstrumentedConsensusServiceImpl.java       |  241 ++
 .../consensus/server/LocalConsensusServer.java  |  354 +++
 .../consensus/server/peer/AbstractPeer.java     |  447 ++++
 .../server/peer/PeerConsensusServer.java        |  152 ++
 .../hbase/consensus/server/peer/PeerServer.java |   18 +
 .../server/peer/PeerServerImmutableContext.java |   10 +
 .../server/peer/PeerServerMutableContext.java   |   23 +
 .../consensus/server/peer/PeerStateMachine.java |   84 +
 .../server/peer/ReconnectTimeoutCallback.java   |   21 +
 .../peer/events/PeerAppendRequestEvent.java     |   18 +
 .../peer/events/PeerAppendResponseEvent.java    |   18 +
 .../server/peer/events/PeerServerEventType.java |   16 +
 .../peer/events/PeerVoteRequestEvent.java       |   19 +
 .../peer/events/PeerVoteResponseEvent.java      |   18 +
 .../server/peer/states/PeerFollower.java        |   19 +
 .../peer/states/PeerHandleAppendResponse.java   |  282 +++
 .../server/peer/states/PeerHandleRPCError.java  |   27 +
 .../peer/states/PeerHandleVoteResponse.java     |   19 +
 .../server/peer/states/PeerRecovery.java        |   19 +
 .../peer/states/PeerSendAppendRequest.java      |   50 +
 .../server/peer/states/PeerSendVoteRequest.java |   22 +
 .../peer/states/PeerServerAsyncState.java       |   18 +
 .../server/peer/states/PeerServerState.java     |   23 +
 .../server/peer/states/PeerServerStateType.java |   16 +
 .../consensus/server/peer/states/Start.java     |   18 +
 .../transition/PeerServerTransitionType.java    |   22 +
 .../hadoop/hbase/consensus/util/RaftUtil.java   |  107 +
 .../hadoop/hbase/io/hfile/Compression.java      |  345 +++
 .../hbase/io/hfile/ReusableStreamGzipCodec.java |  133 +
 .../io/hfile/bucket/CacheFullException.java     |   54 +
 .../hbase/ipc/ByteBufferOutputStream.java       |  104 +
 .../thrift/exceptions/ThriftHBaseException.java |  151 ++
 .../hadoop/hbase/metrics/MetricsBase.java       |  160 ++
 .../apache/hadoop/hbase/metrics/TimeStat.java   |   88 +
 .../hbase/regionserver/DataStoreState.java      |  101 +
 .../hbase/regionserver/RaftEventListener.java   |   17 +
 .../regionserver/RegionOverloadedException.java |   47 +
 .../PercentileMetric.java/PercentileMetric.java |  115 +
 .../hbase/regionserver/wal/AbstractWAL.java     |   69 +
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |  521 ++++
 .../hbase/thrift/generated/IllegalArgument.java |  400 +++
 .../org/apache/hadoop/hbase/util/Arena.java     |   12 +
 .../hadoop/hbase/util/BucketAllocator.java      |  455 ++++
 .../hbase/util/BucketAllocatorException.java    |   34 +
 .../org/apache/hadoop/hbase/util/Bytes.java     | 1612 ++++++++++++
 .../apache/hadoop/hbase/util/ConditionUtil.java |   62 +
 .../hadoop/hbase/util/DaemonThreadFactory.java  |   46 +
 .../org/apache/hadoop/hbase/util/HasThread.java |  101 +
 .../org/apache/hadoop/hbase/util/Histogram.java |  390 +++
 .../apache/hadoop/hbase/util/InHeapArena.java   |   79 +
 .../apache/hadoop/hbase/util/InfoServer.java    |  148 ++
 .../hadoop/hbase/util/InjectionEvent.java       |   45 +
 .../hadoop/hbase/util/InjectionHandler.java     |  171 ++
 .../apache/hadoop/hbase/util/MemoryBuffer.java  |   48 +
 .../java/org/apache/hadoop/hbase/util/Pair.java |  131 +
 .../org/apache/hadoop/hbase/util/Threads.java   |  264 ++
 .../serial/AsyncSerialExecutorServiceImpl.java  |  101 +
 .../util/serial/SerialExecutorService.java      |   34 +
 .../hadoop/hbase/consensus/LocalTestBed.java    |  944 +++++++
 .../hbase/consensus/RaftTestDataProvider.java   |  130 +
 .../hadoop/hbase/consensus/RaftTestUtil.java    |  751 ++++++
 .../consensus/ReplicationLoadForUnitTest.java   |   90 +
 .../hbase/consensus/SimulatedNetworkTester.java |   48 +
 .../hadoop/hbase/consensus/TestBasicCommit.java |  121 +
 .../consensus/TestBasicLeaderElection.java      |   74 +
 .../hbase/consensus/TestBasicPeerFailure.java   |  170 ++
 .../hbase/consensus/TestBasicPeerSeeding.java   |  100 +
 .../hbase/consensus/TestBasicPeerSlow.java      |  230 ++
 .../hbase/consensus/TestBasicQuorumCommit.java  |  101 +
 .../TestBasicQuorumMembershipChange.java        |  219 ++
 .../consensus/TestBasicSeedCommitIndex.java     |  115 +
 .../hbase/consensus/TestCommitDeadline.java     |   85 +
 .../hbase/consensus/TestLogFileViewer.java      |   79 +
 .../hbase/consensus/TestLogWriterAndReader.java |  105 +
 .../consensus/TestLowerRankBecomingLeader.java  |  124 +
 .../consensus/TestPersistLastVotedFor.java      |   83 +
 .../hbase/consensus/TestRaftEventListener.java  |  130 +
 .../hbase/consensus/TestRandomAccessLog.java    |   93 +
 .../hadoop/hbase/consensus/TestReadOnlyLog.java |  147 ++
 .../fsm/TestAsyncStatesInRaftStateMachine.java  |  242 ++
 .../consensus/fsm/TestFiniteStateMachine.java   |  194 ++
 .../fsm/TestFiniteStateMachineService.java      |  144 ++
 .../consensus/fsm/TestIncompleteStates.java     |  291 +++
 .../consensus/log/TestCachedFileChannel.java    |   76 +
 .../consensus/log/TestRemoteLogFetcher.java     |  167 ++
 .../log/TestTransactionLogCreator.java          |   40 +
 .../consensus/metrics/TestPeerMetrics.java      |   23 +
 .../consensus/quorum/TestAggregateTimer.java    |  221 ++
 .../consensus/quorum/TestConstituentTimer.java  |  147 ++
 .../consensus/quorum/TestRepeatingTimer.java    |  143 ++
 .../hadoop/hbase/consensus/rmap/TestParser.java |   93 +
 .../consensus/rmap/TestRMapConfiguration.java   |   55 +
 .../hbase/consensus/rmap/TestRMapReader.java    |  102 +
 .../hbase/consensus/rmap/TestRegionLocator.java |  180 ++
 .../hadoop/hbase/consensus/rmap/rmap.json       | 1228 ++++++++++
 pom.xml                                         |   87 +-
 251 files changed, 39327 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-consensus/pom.xml b/hbase-consensus/pom.xml
new file mode 100644
index 0000000..6a048eb
--- /dev/null
+++ b/hbase-consensus/pom.xml
@@ -0,0 +1,349 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+  /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+  -->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>hbase</artifactId>
+    <groupId>org.apache.hbase</groupId>
+    <version>2.0.0-SNAPSHOT</version>
+    <relativePath>..</relativePath>
+  </parent>
+
+  <artifactId>hbase-consensus</artifactId>
+  <name>HBase - Consensus</name>
+  <description>Implementation of the consensus module using Raft</description>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-site-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <!--Make it so assembly:single does nothing in here-->
+        <artifactId>maven-assembly-plugin</artifactId>
+        <version>${maven.assembly.version}</version>
+        <configuration>
+          <skipAssembly>true</skipAssembly>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+          <version>2.15</version>
+          <dependencies>
+            <dependency>
+              <groupId>org.apache.maven.surefire</groupId>
+              <artifactId>surefire-junit47</artifactId>
+              <version>2.15</version>
+            </dependency>
+          </dependencies>
+          <configuration>
+            <forkedProcessTimeoutInSeconds>2000</forkedProcessTimeoutInSeconds>
+            <argLine>-enableassertions -Xmx1024m -Djava.library.path=${basedir}/lib/native/Linux-amd64-64/</argLine>
+            <redirectTestOutputToFile>true</redirectTestOutputToFile>
+            <systemPropertyVariables>
+              <jacoco-agent.destfile>target/coverage-reports/jacoco-unit.exec</jacoco-agent.destfile>
+            </systemPropertyVariables>
+          </configuration>
+        <!-- Always skip the second part executions, since we only run
+          simple unit tests in this module -->
+        <executions>
+          <execution>
+            <id>secondPartTestsExecution</id>
+            <phase>test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- Make a jar and put the sources in the jar -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+      </plugin>
+    </plugins>
+
+
+    <resources>
+      <resource>
+        <directory>hbase-consensus/src/main/resources/</directory>
+        <includes>
+          <include>hbase-default.xml</include>
+        </includes>
+      </resource>
+    </resources>
+  </build>
+
+  <properties>
+    <guava.version>12.0.1</guava.version>
+    <compileSource>1.7</compileSource>
+    <airlift.version>0.91</airlift.version>
+    <jmxutils.version>1.16</jmxutils.version>
+    <swift.version>0.13.1</swift.version>
+    <disruptor.version>3.3.0</disruptor.version>
+  </properties>
+
+  <dependencyManagement>
+    <dependencies>
+      <!--
+      <dependency>
+              <groupId>com.facebook.swift</groupId>
+              <artifactId>swift-root</artifactId>
+              <version>${swift.version}</version>
+              <type>pom</type>
+      </dependency>
+      !-->
+
+    </dependencies>
+  </dependencyManagement>
+
+  <dependencies>
+    <!-- Intra-project dependencies -->
+      <dependency>
+          <groupId>org.ow2.asm</groupId>
+          <artifactId>asm-all</artifactId>
+          <version>4.1</version>
+      </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+    <!-- General dependencies -->
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>2.2.4</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-all</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.airlift</groupId>
+      <artifactId>configuration</artifactId>
+      <version>${airlift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.airlift</groupId>
+      <artifactId>units</artifactId>
+      <version>${airlift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.airlift</groupId>
+      <artifactId>stats</artifactId>
+      <version>${airlift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.weakref</groupId>
+      <artifactId>jmxutils</artifactId>
+      <version>${jmxutils.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.facebook.swift</groupId>
+      <artifactId>swift-codec</artifactId>
+      <version>${swift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.facebook.swift</groupId>
+      <artifactId>swift-service</artifactId>
+      <version>${swift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.json</groupId>
+      <artifactId>json</artifactId>
+      <version>20090211</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <version>3.1</version>
+    </dependency>
+    <dependency>
+      <groupId>com.lmax</groupId>
+      <artifactId>disruptor</artifactId>
+      <version>${disruptor.version}</version>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+    <!-- Skip the tests in this module -->
+    <profile>
+      <id>skipClientTests</id>
+      <activation>
+        <property>
+          <name>skipClientTests</name>
+        </property>
+      </activation>
+      <properties>
+        <surefire.skipFirstPart>true</surefire.skipFirstPart>
+      </properties>
+    </profile>
+    <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
+ activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+ the same time. -->
+    <profile>
+      <id>hadoop-1.1</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>hadoop.profile</name><value>1.1</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 2.0.0-alpha. Activate using:
+       mvn -Dhadoop.profile=2.0
+    -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>javax.servlet.jsp</groupId>
+              <artifactId>jsp-api</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>com.sun.jersey</groupId>
+              <artifactId>jersey-server</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>javax.servlet</groupId>
+              <artifactId>servlet-api</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>tomcat</groupId>
+              <artifactId>jasper-compiler</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>tomcat</groupId>
+              <artifactId>jasper-runtime</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+
+        <dependency>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-annotations</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
new file mode 100644
index 0000000..00145f1
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -0,0 +1,145 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Adds HBase configuration files to a Configuration
+ */
+public class HBaseConfiguration extends Configuration {
+
+  private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class);
+
+  /**
+   * Instantinating HBaseConfiguration() is deprecated. Please use
+   * HBaseConfiguration#create() to construct a plain Configuration
+   */
+  @Deprecated
+  public HBaseConfiguration() {
+    //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration
+    super();
+    addHbaseResources(this);
+    LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" +
+    		" HBaseConfiguration#create() to construct a plain Configuration");
+  }
+
+  /**
+   * Instantiating HBaseConfiguration() is deprecated. Please use
+   * HBaseConfiguration#create(conf) to construct a plain Configuration
+   */
+  @Deprecated
+  public HBaseConfiguration(final Configuration c) {
+    //TODO:replace with private constructor
+    this();
+    for (Entry<String, String>e: c) {
+      set(e.getKey(), e.getValue());
+    }
+  }
+
+  public static Configuration addHbaseResources(Configuration conf) {
+    // the sequence matters
+    conf.addResource("hbase-default.xml");
+    conf.addResource("hbase-site.xml");
+    conf.addResource("hbase-site-custom.xml");
+    conf.addResource("hbase-compactions.xml");
+    return conf;
+  }
+
+  /**
+   * Creates a Configuration with HBase resources
+   * @return a Configuration with HBase resources
+   */
+  public static Configuration create() {
+    Configuration conf = new Configuration();
+    return addHbaseResources(conf);
+  }
+  /**
+   * Creates a clone of passed configuration.
+   * @param that Configuration to clone.
+   * @return a Configuration created with the hbase-*.xml files plus
+   * the given configuration.
+   */
+  public static Configuration create(final Configuration that) {
+    Configuration conf = create();
+    for (Entry<String, String>e: that) {
+      conf.set(e.getKey(), e.getValue());
+    }
+    return conf;
+  }
+
+  /**
+   * Returns the hash code value for this HBaseConfiguration. The hash code of a
+   * HBaseConfiguration is defined by the xor of the hash codes of its entries.
+   *
+   * @see Configuration#iterator() How the entries are obtained.
+   */
+  @Override
+  @Deprecated
+  public int hashCode() {
+    return hashCode(this);
+  }
+
+  /**
+   * Returns the hash code value for this HBaseConfiguration. The hash code of a
+   * Configuration is defined by the xor of the hash codes of its entries.
+   *
+   * @see Configuration#iterator() How the entries are obtained.
+   */
+  public static int hashCode(Configuration conf) {
+    int hash = 0;
+
+    Iterator<Entry<String, String>> propertyIterator = conf.iterator();
+    while (propertyIterator.hasNext()) {
+      hash ^= propertyIterator.next().hashCode();
+    }
+    return hash;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (!(obj instanceof HBaseConfiguration))
+      return false;
+    HBaseConfiguration otherConf = (HBaseConfiguration) obj;
+    if (size() != otherConf.size()) {
+      return false;
+    }
+    Iterator<Entry<String, String>> propertyIterator = this.iterator();
+    while (propertyIterator.hasNext()) {
+      Entry<String, String> entry = propertyIterator.next();
+      String key = entry.getKey();
+      String value = entry.getValue();
+      if (!value.equals(otherConf.getRaw(key))) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eca32aa4/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
new file mode 100644
index 0000000..0dbe7fd
--- /dev/null
+++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -0,0 +1,1001 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+//import org.apache.hadoop.hbase.regionserver.StoreFile;
+//import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * An HColumnDescriptor contains information about a column family such as the
+ * number of versions, compression settings, etc.
+ *
+ * It is used as input when creating a table or adding a column. Once set, the
+ * parameters that specify a column cannot be changed without deleting the
+ * column and recreating it. If there is data stored in the column, it will be
+ * deleted when the column is deleted.
+ */
+public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
+  static final Log LOG = LogFactory.getLog(HColumnDescriptor.class);
+  // For future backward compatibility
+
+  // Version 3 was when column names become byte arrays and when we picked up
+  // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
+  // Version 5 was when bloom filter descriptors were removed.
+  // Version 6 adds metadata as a map where keys and values are byte[].
+  // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
+  // Version 8 -- reintroduction of bloom filters, changed from boolean to enum
+  // Version 9 -- add data block encoding
+  // Version 10 -- add flash back time
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 10;
+
+  // These constants are used as FileInfo keys
+  public static final String COMPRESSION = "COMPRESSION";
+  public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
+  public static final String ENCODE_ON_DISK =
+      "ENCODE_ON_DISK";
+  public static final String DATA_BLOCK_ENCODING =
+      "DATA_BLOCK_ENCODING";
+  public static final String BLOCKCACHE = "BLOCKCACHE";
+
+  /**
+   * The type of compression.
+   * @see org.apache.hadoop.io.SequenceFile.Writer
+   * @deprecated Compression now means which compression library
+   * rather than 'what' to compress.
+   */
+  @Deprecated
+  public static enum CompressionType {
+    /** Do not compress records. */
+    NONE,
+    /** Compress values only, each separately. */
+    RECORD,
+    /** Compress sequences of records together in blocks. */
+    BLOCK
+  }
+
+  public static final String BLOCKSIZE = "BLOCKSIZE";
+  public static final String LENGTH = "LENGTH";
+  public static final String TTL = "TTL";
+  // The amount of time in seconds in the past upto which we support FlashBack
+  // queries. Ex. 60 * 60 * 24 indicates we support FlashBack queries upto 1 day
+  // ago.
+  public static final String FLASHBACK_QUERY_LIMIT = "FLASHBACK_QUERY_LIMIT";
+  public static final String BLOOMFILTER = "BLOOMFILTER";
+  public static final String BLOOMFILTER_ERRORRATE = "BLOOMFILTER_ERRORRATE";
+  public static final String FOREVER = "FOREVER";
+  public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
+  public static final String ROWKEY_PREFIX_LENGTH_FOR_BLOOMFILTER = "ROWKEY_PREFIX_LENGTH";
+  public static final String HFILEHISTOGRAM_BUCKET_COUNT =
+      "HFILEHISTOGRAM_BUCKET_COUNT";
+
+  /**
+   * Default compression type.
+   */
+  public static final String DEFAULT_COMPRESSION =
+    Compression.Algorithm.NONE.getName();
+
+  /**
+   * Default value of the flag that enables data block encoding on disk, as
+   * opposed to encoding in cache only. We encode blocks everywhere by default,
+   * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
+   */
+  public static final boolean DEFAULT_ENCODE_ON_DISK = true;
+
+  /** Default data block encoding algorithm. */
+  public static final String DEFAULT_DATA_BLOCK_ENCODING =
+      DataBlockEncoding.NONE.toString();
+
+  /**
+   * Default number of versions of a record to keep.
+   */
+  public static final int DEFAULT_VERSIONS = 3;
+
+  /**
+   * Default setting for whether to serve from memory or not.
+   */
+  public static final boolean DEFAULT_IN_MEMORY = false;
+
+  /**
+   * Default setting for whether to use a block cache or not.
+   */
+  public static final boolean DEFAULT_BLOCKCACHE = true;
+
+  /**
+   * Default size of blocks in files store to the filesytem.  Use smaller for
+   * faster random-access at expense of larger indices (more memory consumption).
+   */
+  public static final int DEFAULT_BLOCKSIZE = 1024; //HFile.DEFAULT_BLOCKSIZE;
+
+//  /**
+//   * Default setting for whether or not to use bloomfilters.
+//   */
+  public static final String DEFAULT_BLOOMFILTER = " "; // StoreFile.BloomType.NONE.toString();
+
+  /**
+   * Default setting for the RowKey Prefix Length for the Bloomfilter.
+   */
+  public static final int DEFAULT_ROWKEY_PREFIX_LENGTH_FOR_BLOOM = -1;
+
+  /**
+   * Default value for bloom filter error rate.
+   */
+  public static final float DEFAULT_BLOOMFILTER_ERROR_RATE = 0.01f;
+
+
+  /**
+   * Default time to live of cell contents.
+   */
+  public static final int DEFAULT_TTL = HConstants.FOREVER;
+
+  /**
+   * Default flash back time. Flash back time is the number of seconds in the
+   * past upto which we support flash back queries.
+   */
+  public static final int DEFAULT_FLASHBACK_QUERY_LIMIT = 0;
+
+  /**
+   * Default scope.
+   */
+  public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
+
+  private final static Map<String, String> DEFAULT_VALUES
+    = new HashMap<String, String>();
+  private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
+    = new HashSet<ImmutableBytesWritable>();
+  static {
+      DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
+      DEFAULT_VALUES.put(BLOOMFILTER_ERRORRATE,
+          String.valueOf(DEFAULT_BLOOMFILTER_ERROR_RATE));
+      DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
+      DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
+      DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
+      DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
+      DEFAULT_VALUES.put(FLASHBACK_QUERY_LIMIT,
+        String.valueOf(DEFAULT_FLASHBACK_QUERY_LIMIT));
+      DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
+      DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
+      DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
+      DEFAULT_VALUES.put(ENCODE_ON_DISK,
+          String.valueOf(DEFAULT_ENCODE_ON_DISK));
+      DEFAULT_VALUES.put(DATA_BLOCK_ENCODING,
+          String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
+      DEFAULT_VALUES.put(ROWKEY_PREFIX_LENGTH_FOR_BLOOMFILTER,
+          String.valueOf(DEFAULT_ROWKEY_PREFIX_LENGTH_FOR_BLOOM));
+//      DEFAULT_VALUES.put(HFILEHISTOGRAM_BUCKET_COUNT,
+//          String.valueOf(HFileHistogram.DEFAULT_HFILEHISTOGRAM_BINCOUNT));
+      for (String s : DEFAULT_VALUES.keySet()) {
+        RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
+      }
+  }
+
+  /*
+   * Cache here the HCD value.
+   * Question: its OK to cache since when we're reenable, we create a new HCD?
+   */
+  private volatile Integer blocksize = null;
+
+  // Column family name
+  private byte [] name;
+
+  // Column metadata
+  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+    new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
+
+  /*
+   * Cache the max versions rather than calculate it every time.
+   */
+  private int cachedMaxVersions = -1;
+
+  /**
+   * Default constructor. Must be present for Writable.
+   */
+  public HColumnDescriptor() {
+    this.name = null;
+  }
+
+  /**
+   * Construct a column descriptor specifying only the family name
+   * The other attributes are defaulted.
+   *
+   * @param familyName Column family name. Must be 'printable' -- digit or
+   * letter -- and may not contain a <code>:<code>
+   */
+  public HColumnDescriptor(final String familyName) {
+    this(Bytes.toBytes(familyName));
+  }
+
+  /**
+   * Construct a column descriptor specifying only the family name
+   * The other attributes are defaulted.
+   *
+   * @param familyName Column family name. Must be 'printable' -- digit or
+   * letter -- and may not contain a <code>:<code>
+   */
+  public HColumnDescriptor(final byte [] familyName) {
+    this (familyName == null || familyName.length <= 0?
+      HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
+      DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
+      DEFAULT_TTL, DEFAULT_BLOOMFILTER);
+  }
+
+  /**
+   * Constructor.
+   * Makes a deep copy of the supplied descriptor.
+   * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
+   * @param desc The descriptor.
+   */
+  public HColumnDescriptor(HColumnDescriptor desc) {
+    super();
+    this.name = desc.name.clone();
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        desc.values.entrySet()) {
+      this.values.put(e.getKey(), e.getValue());
+    }
+  }
+
+  /**
+   * Constructor
+   * @param familyName Column family name. Must be 'printable' -- digit or
+   * letter -- and may not contain a <code>:<code>
+   * @param maxVersions Maximum number of versions to keep
+   * @param compression Compression type
+   * @param inMemory If true, column data should be kept in an HRegionServer's
+   * cache
+   * @param blockCacheEnabled If true, MapFile blocks should be cached
+   * @param timeToLive Time-to-live of cell contents, in seconds
+   * (use HConstants.FOREVER for unlimited TTL)
+   * @param bloomFilter Bloom filter type for this column
+   *
+   * @throws IllegalArgumentException if passed a family name that is made of
+   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
+   * a <code>:</code>
+   * @throws IllegalArgumentException if the number of versions is &lt;= 0
+   * @deprecated use {@link #HColumnDescriptor(String)} and setters
+   */
+  @Deprecated
+  public HColumnDescriptor(final byte [] familyName, final int maxVersions,
+      final String compression, final boolean inMemory,
+      final boolean blockCacheEnabled,
+      final int timeToLive, final String bloomFilter) {
+    this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
+      DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
+  }
+
+  /**
+   * Constructor
+   * @param familyName Column family name. Must be 'printable' -- digit or
+   * letter -- and may not contain a <code>:<code>
+   * @param maxVersions Maximum number of versions to keep
+   * @param compression Compression type
+   * @param inMemory If true, column data should be kept in an HRegionServer's
+   * cache
+   * @param blockCacheEnabled If true, MapFile blocks should be cached
+   * @param blocksize Block size to use when writing out storefiles.  Use
+   * smaller block sizes for faster random-access at expense of larger indices
+   * (more memory consumption).  Default is usually 64k.
+   * @param timeToLive Time-to-live of cell contents, in seconds
+   * (use HConstants.FOREVER for unlimited TTL)
+   * @param bloomFilter Bloom filter type for this column
+   * @param scope The scope tag for this column
+   *
+   * @throws IllegalArgumentException if passed a family name that is made of
+   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
+   * a <code>:</code>
+   * @throws IllegalArgumentException if the number of versions is &lt;= 0
+   * @deprecated use {@link #HColumnDescriptor(String)} and setters
+   */
+  @Deprecated
+  public HColumnDescriptor(final byte [] familyName, final int maxVersions,
+      final String compression, final boolean inMemory,
+      final boolean blockCacheEnabled, final int blocksize,
+      final int timeToLive, final String bloomFilter, final int scope) {
+    this(familyName, maxVersions,
+        compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
+        inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
+        scope, DEFAULT_BLOOMFILTER_ERROR_RATE);
+  }
+
+  /**
+   * Constructor
+   * @param familyName Column family name. Must be 'printable' -- digit or
+   * letter -- and may not contain a <code>:<code>
+   * @param maxVersions Maximum number of versions to keep
+   * @param compression Compression type
+   * @param encodeOnDisk whether to use the specified data block encoding
+   *        on disk. If false, the encoding will be used in cache only.
+   * @param dataBlockEncoding data block encoding
+   * @param inMemory If true, column data should be kept in an HRegionServer's
+   * cache
+   * @param blockCacheEnabled If true, MapFile blocks should be cached
+   * @param blocksize
+   * @param timeToLive Time-to-live of cell contents, in seconds
+   * (use HConstants.FOREVER for unlimited TTL)
+   * @param bloomFilter Bloom filter type for this column
+   * @param scope The scope tag for this column
+   * @param bloomErrorRate Bloom filter error rate for this column
+   * @throws IllegalArgumentException if passed a family name that is made of
+   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
+   * a <code>:</code>
+   * @throws IllegalArgumentException if the number of versions is &lt;= 0
+   * @deprecated use {@link #HColumnDescriptor(String)} and setters
+   */
+  @Deprecated
+  public HColumnDescriptor(final byte[] familyName,
+      final int maxVersions,
+      final String compression, final boolean encodeOnDisk,
+      final String dataBlockEncoding, final boolean inMemory,
+      final boolean blockCacheEnabled, final int blocksize,
+      final int timeToLive, final String bloomFilter, final int scope,
+      float bloomErrorRate) {
+    isLegalFamilyName(familyName);
+    this.name = familyName;
+
+    if (maxVersions <= 0) {
+      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
+      // Until there is support, consider 0 or < 0 -- a configuration error.
+      throw new IllegalArgumentException("Maximum versions must be positive");
+    }
+    setMaxVersions(maxVersions);
+    setInMemory(inMemory);
+    setBlockCacheEnabled(blockCacheEnabled);
+    setTimeToLive(timeToLive);
+    setCompressionType(Compression.Algorithm.
+      valueOf(compression.toUpperCase()));
+    setEncodeOnDisk(encodeOnDisk);
+    setDataBlockEncoding(DataBlockEncoding.
+        valueOf(dataBlockEncoding.toUpperCase()));
+//    setBloomFilterType(StoreFile.BloomType.
+//      valueOf(bloomFilter.toUpperCase()));
+    setBloomFilterErrorRate(bloomErrorRate);
+    setBlocksize(blocksize);
+    setScope(scope);
+  }
+
+  /**
+   * @param b Family name.
+   * @return <code>b</code>
+   * @throws IllegalArgumentException If not null and not a legitimate family
+   * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
+   * <code>b</code> can be null when deserializing).  Cannot start with a '.'
+   * either.
+   */
+  public static byte [] isLegalFamilyName(final byte [] b) {
+    if (b == null) {
+      return b;
+    }
+    if (b[0] == '.') {
+      throw new IllegalArgumentException("Family names cannot start with a " +
+        "period: " + Bytes.toString(b));
+    }
+    for (int i = 0; i < b.length; i++) {
+      if (Character.isISOControl(b[i]) || b[i] == ':') {
+        throw new IllegalArgumentException("Illegal character <" + b[i] +
+          ">. Family names cannot contain control characters or colons: " +
+          Bytes.toString(b));
+      }
+    }
+    return b;
+  }
+
+  public void setName(byte[] name) {
+    this.name = name;
+  }
+
+  /**
+   * @return Name of this column family
+   */
+  public byte [] getName() {
+    return name;
+  }
+
+  /**
+   * @return Name of this column family
+   */
+  public String getNameAsString() {
+    return Bytes.toString(this.name);
+  }
+
+  /**
+   * @param key The key.
+   * @return The value.
+   */
+  public byte[] getValue(byte[] key) {
+    ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
+    if (ibw == null)
+      return null;
+    return ibw.get();
+  }
+
+  /**
+   * @param key The key.
+   * @return The value as a string.
+   */
+  public String getValue(String key) {
+    byte[] value = getValue(Bytes.toBytes(key));
+    if (value == null)
+      return null;
+    return Bytes.toString(value);
+  }
+
+  /**
+   * @return All values.
+   */
+  public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
+    // shallow pointer copy
+    return Collections.unmodifiableMap(values);
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setValue(byte[] key, byte[] value) {
+    values.put(new ImmutableBytesWritable(key),
+      new ImmutableBytesWritable(value));
+    return this;
+  }
+
+  /**
+   * @param key Key whose key and value we're to remove from HCD parameters.
+   */
+  public void remove(final byte [] key) {
+    values.remove(new ImmutableBytesWritable(key));
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setValue(String key, String value) {
+    if (value == null) {
+      remove(Bytes.toBytes(key));
+    } else {
+      setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+    }
+    return this;
+  }
+
+  /** @return compression type being used for the column family */
+  public Compression.Algorithm getCompression() {
+    String n = getValue(COMPRESSION);
+    if (n != null) {
+      return Compression.Algorithm.valueOf(n.toUpperCase());
+    } else {
+      return Compression.Algorithm.valueOf(DEFAULT_COMPRESSION);
+    }
+  }
+
+  /** @return maximum number of versions */
+  public synchronized int getMaxVersions() {
+    if (this.cachedMaxVersions == -1) {
+      String value = getValue(HConstants.VERSIONS);
+      this.cachedMaxVersions = (value != null)?
+        Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
+    }
+    return this.cachedMaxVersions;
+  }
+
+  /**
+   * @param maxVersions maximum number of versions
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setMaxVersions(int maxVersions) {
+    setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
+    cachedMaxVersions = maxVersions;
+    return this;
+  }
+
+  /**
+   * @return Blocksize.
+   */
+  public synchronized int getBlocksize() {
+    if (this.blocksize == null) {
+      String value = getValue(BLOCKSIZE);
+      this.blocksize = (value != null)?
+        Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
+    }
+    return this.blocksize.intValue();
+  }
+
+  /**
+   * @param s Blocksize to use when writing out storefiles/hfiles on this
+   * column family.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setBlocksize(int s) {
+    setValue(BLOCKSIZE, Integer.toString(s));
+    this.blocksize = null;
+    return this;
+  }
+
+  /**
+   * @return Compression type setting.
+   */
+  public Compression.Algorithm getCompressionType() {
+    return getCompression();
+  }
+
+  /**
+   * Compression types supported in hbase.
+   * LZO is not bundled as part of the hbase distribution.
+   * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
+   * for how to enable it.
+   * @param type Compression type setting.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
+    String compressionType = type.getName().toUpperCase();
+    return setValue(COMPRESSION, compressionType);
+  }
+
+  /**
+   * @param compressionTypeStr compression type as a string
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setCompressionType(String compressionTypeStr) {
+    return setCompressionType(
+        Compression.Algorithm.valueOf(compressionTypeStr.toUpperCase()));
+  }
+
+  /** @return data block encoding algorithm used on disk */
+  public DataBlockEncoding getDataBlockEncodingOnDisk() {
+    String encodeOnDiskStr = getValue(ENCODE_ON_DISK);
+    boolean encodeOnDisk;
+    if (encodeOnDiskStr == null) {
+      encodeOnDisk = DEFAULT_ENCODE_ON_DISK;
+    } else {
+      encodeOnDisk = Boolean.valueOf(encodeOnDiskStr);
+    }
+
+    if (!encodeOnDisk) {
+      // No encoding on disk.
+      return DataBlockEncoding.NONE;
+    }
+    return getDataBlockEncoding();
+  }
+
+  /**
+   * Set the flag indicating that we only want to encode data block in cache
+   * but not on disk.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
+    return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk));
+  }
+
+  /**
+   * @return the data block encoding algorithm used in block cache and
+   *         optionally on disk
+   */
+  public DataBlockEncoding getDataBlockEncoding() {
+    String type = getValue(DATA_BLOCK_ENCODING);
+    if (type == null) {
+      type = DEFAULT_DATA_BLOCK_ENCODING;
+    }
+    return DataBlockEncoding.valueOf(type);
+  }
+
+  /**
+   * Set data block encoding algorithm used in block cache.
+   * @param type What kind of data block encoding will be used.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
+    String name;
+    if (type != null) {
+      name = type.toString();
+    } else {
+      name = DataBlockEncoding.NONE.toString();
+    }
+    return setValue(DATA_BLOCK_ENCODING, name);
+  }
+
+  /**
+   * @return True if we are to keep all in use HRegionServer cache.
+   */
+  public boolean isInMemory() {
+    String value = getValue(HConstants.IN_MEMORY);
+    if (value != null)
+      return Boolean.valueOf(value).booleanValue();
+    return DEFAULT_IN_MEMORY;
+  }
+
+  /**
+   * @param inMemory True if we are to keep all values in the HRegionServer
+   * cache
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setInMemory(boolean inMemory) {
+    return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
+  }
+
+  /**
+   * @return Time-to-live of cell contents, in seconds.
+   */
+  public int getTimeToLive() {
+    String value = getValue(TTL);
+    return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
+  }
+
+  /**
+   * @param timeToLive Time-to-live of cell contents, in seconds.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setTimeToLive(int timeToLive) {
+    return setValue(TTL, Integer.toString(timeToLive));
+  }
+
+  /**
+   * @return the time in seconds for how far back in the past we support flash
+   *         back queries.
+   */
+  public int getFlashBackQueryLimit() {
+    String value = getValue(FLASHBACK_QUERY_LIMIT);
+    return (value != null) ? Integer.valueOf(value).intValue()
+        : DEFAULT_FLASHBACK_QUERY_LIMIT;
+  }
+
+  /**
+   * @param flashBackQueryLimit
+   *          the time in seconds for how far back in the past we support flash
+   *          back queries.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setFlashBackQueryLimit(int flashBackQueryLimit) {
+    if (flashBackQueryLimit < 0) {
+      throw new IllegalArgumentException(
+          "FlashBackQueryLimit cannot be negative");
+    }
+    return setValue(FLASHBACK_QUERY_LIMIT,
+        Integer.toString(flashBackQueryLimit));
+  }
+
+  /**
+   * @return True if MapFile blocks should be cached.
+   */
+  public boolean isBlockCacheEnabled() {
+    String value = getValue(BLOCKCACHE);
+    if (value != null)
+      return Boolean.valueOf(value).booleanValue();
+    return DEFAULT_BLOCKCACHE;
+  }
+
+  /**
+   * @param blockCacheEnabled True if MapFile blocks should be cached.
+   * @return this (for chained invocation)
+   */
+  public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
+    return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
+  }
+
+//  /**
+//   * @return bloom filter type used for new StoreFiles in ColumnFamily
+//   */
+//  public StoreFile.BloomType getBloomFilterType() {
+//    String n = getValue(BLOOMFILTER);
+//    if (n == null) {
+//      n = DEFAULT_BLOOMFILTER;
+//    }
+//    return StoreFile.BloomType.valueOf(n.toUpperCase());
+//  }
+
+  public int getIntValueFromString(String key, int defaultValue,
+      String parseErrorMessage) {
+    String n = getValue(key);
+    int returnValue = defaultValue;
+    if (n != null) {
+      try {
+        returnValue = Integer.parseInt(n);
+      } catch (Throwable e) {
+        LOG.error("Invalid Input " + n + ". " + parseErrorMessage, e);
+      }
+    }
+    return returnValue;
+  }
+
+  public int getHFileHistogramBucketCount() {
+    return 0;
+//      getIntValueFromString(
+//        HFILEHISTOGRAM_BUCKET_COUNT,
+//        HFileHistogram.DEFAULT_HFILEHISTOGRAM_BINCOUNT,
+//        "Cannot parse the histogram bin count");
+  }
+
+  /**
+   * @return the number of bytes as row key prefix for the bloom filter
+   */
+  public int getRowPrefixLengthForBloom() {
+    return getIntValueFromString(
+        ROWKEY_PREFIX_LENGTH_FOR_BLOOMFILTER,
+        DEFAULT_ROWKEY_PREFIX_LENGTH_FOR_BLOOM,
+        "Cannot parse row key prefix length");
+  }
+
+  public void setRowKeyPrefixLengthForBloom(int prefixLength) {
+    if (prefixLength > 0) {
+      setValue(ROWKEY_PREFIX_LENGTH_FOR_BLOOMFILTER, String.valueOf(prefixLength));
+    }
+  }
+
+  public void setHFileHistogramBucketCount(int histogramBucketCount) {
+    if (histogramBucketCount > 0) {
+      setValue(
+          HFILEHISTOGRAM_BUCKET_COUNT,
+          String.valueOf(histogramBucketCount));
+    }
+  }
+
+//  /**
+//   * @param bt bloom filter type
+//   * @return this (for chained invocation)
+//   */
+//  public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) {
+//    return setValue(BLOOMFILTER, bt.toString());
+//  }
+
+//  /**
+//   * @param bloomTypeStr bloom filter type as a string
+//   * @return this (for chained invocation)
+//   */
+//  public HColumnDescriptor setBloomFilterType(String bloomTypeStr) {
+//    return setBloomFilterType(BloomType.valueOf(bloomTypeStr.toUpperCase()));
+//  }
+
+  public void setBloomFilterErrorRate(float bloomErrorRate) {
+    setValue(BLOOMFILTER_ERRORRATE, Float.toString(bloomErrorRate));
+  }
+  public float getBloomFilterErrorRate() {
+    String value = getValue(BLOOMFILTER_ERRORRATE);
+    return (value != null)? Float.valueOf(value).floatValue() : DEFAULT_BLOOMFILTER_ERROR_RATE;
+  }
+
+  /**
+   * @return the scope tag
+   */
+  public int getScope() {
+    String value = getValue(REPLICATION_SCOPE);
+    if (value != null) {
+      return Integer.valueOf(value).intValue();
+    }
+    return DEFAULT_REPLICATION_SCOPE;
+  }
+
+ /**
+  * @param scope the scope tag
+  * @return this (for chained invocation)
+  */
+  public HColumnDescriptor setScope(int scope) {
+    return setValue(REPLICATION_SCOPE, Integer.toString(scope));
+  }
+
+  /**
+   * @see java.lang.Object#toString()
+   */
+  @Override
+  public String toString() {
+    StringBuilder s = new StringBuilder();
+    s.append('{');
+    s.append(HConstants.NAME);
+    s.append(" => '");
+    s.append(Bytes.toString(name));
+    s.append("'");
+    s.append(getValues(true));
+    s.append('}');
+    return s.toString();
+  }
+
+  public String toStringCustomizedValues() {
+    StringBuilder s = new StringBuilder();
+    s.append('{');
+    s.append(HConstants.NAME);
+    s.append(" => '");
+    s.append(Bytes.toString(name));
+    s.append("'");
+    s.append(getValues(false));
+    s.append('}');
+    return s.toString();
+  }
+
+  private StringBuilder getValues(boolean printDefaults) {
+    StringBuilder s = new StringBuilder();
+
+    boolean hasAdvancedKeys = false;
+
+    // print all reserved keys first
+    for (ImmutableBytesWritable k : values.keySet()) {
+      if (!RESERVED_KEYWORDS.contains(k)) {
+        hasAdvancedKeys = true;
+        continue;
+      }
+      String key = Bytes.toString(k.get());
+      String value = Bytes.toString(values.get(k).get());
+      if (printDefaults
+          || !DEFAULT_VALUES.containsKey(key)
+          || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
+        s.append(", ");
+        s.append(key);
+        s.append(" => ");
+        s.append('\'').append(value).append('\'');
+      }
+    }
+
+    // print all other keys as advanced options
+    if (hasAdvancedKeys) {
+      s.append(", ");
+      s.append(HConstants.CONFIG).append(" => ");
+      s.append('{');
+      boolean printComma = false;
+      for (ImmutableBytesWritable k : values.keySet()) {
+        if (RESERVED_KEYWORDS.contains(k)) {
+          continue;
+        }
+        String key = Bytes.toString(k.get());
+        String value = Bytes.toString(values.get(k).get());
+        if (printComma) {
+          s.append(", ");
+        }
+        printComma = true;
+        s.append('\'').append(key).append('\'');
+        s.append(" => ");
+        s.append('\'').append(value).append('\'');
+      }
+      s.append('}');
+    }
+    return s;
+  }
+
+  public static Map<String, String> getDefaultValues() {
+    return Collections.unmodifiableMap(DEFAULT_VALUES);
+  }
+
+  /**
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof HColumnDescriptor)) {
+      return false;
+    }
+    return compareTo((HColumnDescriptor)obj) == 0;
+  }
+
+  /**
+   * @see java.lang.Object#hashCode()
+   */
+  @Override
+  public int hashCode() {
+    int result = Bytes.hashCode(this.name);
+    result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
+    result ^= values.hashCode();
+    return result;
+  }
+
+  // Writable
+
+  public void readFields(DataInput in) throws IOException {
+    int version = in.readByte();
+    if (version < 6) {
+      if (version <= 2) {
+        Text t = new Text();
+        t.readFields(in);
+        this.name = t.getBytes();
+//        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
+//            > 0) {
+//          this.name = stripColon(this.name);
+//        }
+      } else {
+        this.name = Bytes.readByteArray(in);
+      }
+      this.values.clear();
+      setMaxVersions(in.readInt());
+      int ordinal = in.readInt();
+      setCompressionType(Compression.Algorithm.values()[ordinal]);
+      setInMemory(in.readBoolean());
+//      setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
+//      if (getBloomFilterType() != BloomType.NONE && version < 5) {
+//        // If a bloomFilter is enabled and the column descriptor is less than
+//        // version 5, we need to skip over it to read the rest of the column
+//        // descriptor. There are no BloomFilterDescriptors written to disk for
+//        // column descriptors with a version number >= 5
+//        throw new UnsupportedClassVersionError(this.getClass().getName() +
+//            " does not support backward compatibility with versions older " +
+//            "than version 5");
+//      }
+      if (version > 1) {
+        setBlockCacheEnabled(in.readBoolean());
+      }
+      if (version > 2) {
+       setTimeToLive(in.readInt());
+      }
+    } else {
+      // version 6+
+      this.name = Bytes.readByteArray(in);
+      this.values.clear();
+      int numValues = in.readInt();
+      for (int i = 0; i < numValues; i++) {
+        ImmutableBytesWritable key = new ImmutableBytesWritable();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        key.readFields(in);
+        value.readFields(in);
+
+//        // in version 8, the BloomFilter setting changed from bool to enum
+//        if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
+//          value.set(Bytes.toBytes(
+//              Boolean.getBoolean(Bytes.toString(value.get()))
+//                ? BloomType.ROW.toString()
+//                : BloomType.NONE.toString()));
+//        }
+
+        values.put(key, value);
+      }
+      if (version == 6) {
+        // Convert old values.
+        setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
+      }
+    }
+  }
+
+  public void write(DataOutput out) throws IOException {
+    out.writeByte(COLUMN_DESCRIPTOR_VERSION);
+    Bytes.writeByteArray(out, this.name);
+    out.writeInt(values.size());
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      e.getKey().write(out);
+      e.getValue().write(out);
+    }
+  }
+
+  // Comparable
+
+  public int compareTo(HColumnDescriptor o) {
+    int result = Bytes.compareTo(this.name, o.getName());
+    if (result == 0) {
+      // punt on comparison for ordering, just calculate difference
+      result = this.values.hashCode() - o.values.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
+    return result;
+  }
+}


Mime
View raw message