hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1344510 [1/2] - in /hbase/trunk: ./ bin/ hbase-assembly/ hbase-assembly/src/assembly/ hbase-common/ hbase-common/src/ hbase-common/src/main/ hbase-common/src/main/java/ hbase-common/src/main/java/org/ hbase-common/src/main/java/org/apache/...
Date Wed, 30 May 2012 23:51:45 GMT
Author: stack
Date: Wed May 30 23:51:44 2012
New Revision: 1344510

URL: http://svn.apache.org/viewvc?rev=1344510&view=rev
Log:
HBASE-6087 Add hbase-common module

Added:
    hbase/trunk/hbase-common/
    hbase/trunk/hbase-common/pom.xml
    hbase/trunk/hbase-common/src/
    hbase/trunk/hbase-common/src/main/
    hbase/trunk/hbase-common/src/main/java/
    hbase/trunk/hbase-common/src/main/java/org/
    hbase/trunk/hbase-common/src/main/java/org/apache/
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
Removed:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
Modified:
    hbase/trunk/bin/hbase
    hbase/trunk/hbase-assembly/pom.xml
    hbase/trunk/hbase-assembly/src/assembly/all.xml
    hbase/trunk/hbase-server/pom.xml
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
    hbase/trunk/hbase-site/src/docbkx/developer.xml
    hbase/trunk/pom.xml

Modified: hbase/trunk/bin/hbase
URL: http://svn.apache.org/viewvc/hbase/trunk/bin/hbase?rev=1344510&r1=1344509&r2=1344510&view=diff
==============================================================================
--- hbase/trunk/bin/hbase (original)
+++ hbase/trunk/bin/hbase Wed May 30 23:51:44 2012
@@ -137,6 +137,16 @@ add_to_cp_if_exists() {
   fi
 }
 
+# For releases, add hbase & webapps to CLASSPATH
+# Webapps must come first else it messes up Jetty
+if [ -d "$HBASE_HOME/hbase-webapps" ]; then
+  add_to_cp_if_exists "${HBASE_HOME}"
+fi
+#add if we are in a dev environment
+if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then
+  add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target"
+fi
+
 add_maven_deps_to_classpath() {
   # Need to generate classpath from maven pom. This is costly so generate it
   # and cache it. Save the file into our target dir so a mvn clean will get
@@ -168,23 +178,13 @@ add_maven_test_classes_to_classpath(){
   done
 }
 
-# Add maven target directory
+#Add the development env class path stuff
 if $in_dev_env; then
   add_maven_deps_to_classpath
   add_maven_main_classes_to_classpath
   add_maven_test_classes_to_classpath
 fi
 
-# For releases, add hbase & webapps to CLASSPATH
-# Webapps must come first else it messes up Jetty
-if [ -d "$HBASE_HOME/hbase-webapps" ]; then
-  add_to_cp_if_exists "${HBASE_HOME}"
-fi
-#add if we are in a dev environment
-if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then
-  add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target"
-fi
-
 #add the hbase jars for each module
 for f in $HBASE_HOME/hbase-jars/hbase*.jar; do
 	if [[ $f = *sources.jar ]]

Modified: hbase/trunk/hbase-assembly/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-assembly/pom.xml?rev=1344510&r1=1344509&r2=1344510&view=diff
==============================================================================
--- hbase/trunk/hbase-assembly/pom.xml (original)
+++ hbase/trunk/hbase-assembly/pom.xml Wed May 30 23:51:44 2012
@@ -185,6 +185,10 @@
 
   <dependencies>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
       <artifactId>hbase-server</artifactId>
       <groupId>org.apache.hbase</groupId>
       <scope>compile</scope>

Modified: hbase/trunk/hbase-assembly/src/assembly/all.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-assembly/src/assembly/all.xml?rev=1344510&r1=1344509&r2=1344510&view=diff
==============================================================================
--- hbase/trunk/hbase-assembly/src/assembly/all.xml (original)
+++ hbase/trunk/hbase-assembly/src/assembly/all.xml Wed May 30 23:51:44 2012
@@ -161,6 +161,7 @@
             <excludes>
               <!-- Add new modules here -->
               <exclude>hbase-server-${project.version}.jar</exclude>
+              <exclude>hbase-common-${project.version>.jar</exclude>
               <exclude>target/</exclude>
               <exclude>test/</exclude>
               <exclude>.classpath</exclude>

Added: hbase/trunk/hbase-common/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/pom.xml?rev=1344510&view=auto
==============================================================================
--- hbase/trunk/hbase-common/pom.xml (added)
+++ hbase/trunk/hbase-common/pom.xml Wed May 30 23:51:44 2012
@@ -0,0 +1,197 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>hbase</artifactId>
+    <groupId>org.apache.hbase</groupId>
+    <version>0.95-SNAPSHOT</version>
+    <relativePath>..</relativePath>
+  </parent>
+
+  <artifactId>hbase-common</artifactId>
+  <name>HBase - Common</name>
+  <description>Common functionality for HBase</description>
+
+  <build>
+    <pluginManagement>
+      <plugins>
+        <plugin>
+          <artifactId>maven-surefire-plugin</artifactId>
+          <!-- Always skip the second part executions, since we only run
+          simple unit tests in this module -->
+          <executions>
+            <execution>
+              <id>secondPartTestsExecution</id>
+              <phase>test</phase>
+              <goals>
+                <goal>test</goal>
+              </goals>
+              <configuration>
+                <skip>true</skip>
+              </configuration>
+            </execution>
+          </executions>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+  </build>
+
+  <dependencies>
+    <!-- General dependencies -->
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+  <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+    the same time. -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-test</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 2.0.0-alpha. Activate using:
+       mvn -Dhadoop.profile=2.0
+    -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>2.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minicluster</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minicluster</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>
\ No newline at end of file

Added: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java?rev=1344510&view=auto
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java (added)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java Wed May 30 23:51:44 2012
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.VersionInfo;
+
+/**
+ * Adds HBase configuration files to a Configuration
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class HBaseConfiguration extends Configuration {
+
+  private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class);
+
+  // a constant to convert a fraction to a percentage
+  private static final int CONVERT_TO_PERCENTAGE = 100;
+
+  /**
+   * Instantinating HBaseConfiguration() is deprecated. Please use
+   * HBaseConfiguration#create() to construct a plain Configuration
+   */
+  @Deprecated
+  public HBaseConfiguration() {
+    //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration
+    super();
+    addHbaseResources(this);
+    LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use"
+        + " HBaseConfiguration#create() to construct a plain Configuration");
+  }
+
+  /**
+   * Instantiating HBaseConfiguration() is deprecated. Please use
+   * HBaseConfiguration#create(conf) to construct a plain Configuration
+   */
+  @Deprecated
+  public HBaseConfiguration(final Configuration c) {
+    //TODO:replace with private constructor
+    this();
+    merge(this, c);
+  }
+
+  private static void checkDefaultsVersion(Configuration conf) {
+    if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return;
+    String defaultsVersion = conf.get("hbase.defaults.for.version");
+    String thisVersion = VersionInfo.getVersion();
+    if (!thisVersion.equals(defaultsVersion)) {
+      throw new RuntimeException(
+        "hbase-default.xml file seems to be for and old version of HBase (" +
+        defaultsVersion + "), this version is " + thisVersion);
+    }
+  }
+
+  private static void checkForClusterFreeMemoryLimit(Configuration conf) {
+      float globalMemstoreLimit = conf.getFloat("hbase.regionserver.global.memstore.upperLimit", 0.4f);
+      int gml = (int)(globalMemstoreLimit * CONVERT_TO_PERCENTAGE);
+      float blockCacheUpperLimit =
+        conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
+          HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
+      int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE);
+      if (CONVERT_TO_PERCENTAGE - (gml + bcul)
+              < (int)(CONVERT_TO_PERCENTAGE *
+                      HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) {
+          throw new RuntimeException(
+            "Current heap configuration for MemStore and BlockCache exceeds " +
+            "the threshold required for successful cluster operation. " +
+            "The combined value cannot exceed 0.8. Please check " +
+            "the settings for hbase.regionserver.global.memstore.upperLimit and " +
+            "hfile.block.cache.size in your configuration. " +
+            "hbase.regionserver.global.memstore.upperLimit is " +
+            globalMemstoreLimit +
+            " hfile.block.cache.size is " + blockCacheUpperLimit);
+      }
+  }
+
+  public static Configuration addHbaseResources(Configuration conf) {
+    conf.addResource("hbase-default.xml");
+    conf.addResource("hbase-site.xml");
+
+    checkDefaultsVersion(conf);
+    checkForClusterFreeMemoryLimit(conf);
+    return conf;
+  }
+
+  /**
+   * Creates a Configuration with HBase resources
+   * @return a Configuration with HBase resources
+   */
+  public static Configuration create() {
+    Configuration conf = new Configuration();
+    return addHbaseResources(conf);
+  }
+
+  /**
+   * Creates a clone of passed configuration.
+   * @param that Configuration to clone.
+   * @return a clone of passed configuration.
+   */
+  public static Configuration create(final Configuration that) {
+    return new Configuration(that);
+  }
+
+  /**
+   * Merge two configurations.
+   * @param destConf the configuration that will be overwritten with items
+   *                 from the srcConf
+   * @param srcConf the source configuration
+   **/
+  public static void merge(Configuration destConf, Configuration srcConf) {
+    for (Entry<String, String> e : srcConf) {
+      destConf.set(e.getKey(), e.getValue());
+    }
+  }
+
+  /**
+   * @return whether to show HBase Configuration in servlet
+   */
+  public static boolean isShowConfInServlet() {
+    boolean isShowConf = false;
+    try {
+      if (Class.forName("org.apache.hadoop.conf.ConfServlet") != null) {
+        isShowConf = true;
+      }
+    } catch (Exception e) {
+    }
+    return isShowConf;
+  }
+}

Added: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1344510&view=auto
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (added)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Wed May 30 23:51:44 2012
@@ -0,0 +1,674 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * HConstants holds a bunch of HBase-related constants
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public final class HConstants {
+  /**
+   * Status codes used for return values of bulk operations.
+   */
+  public enum OperationStatusCode {
+    NOT_RUN,
+    SUCCESS,
+    SANITY_CHECK_FAILURE,
+    FAILURE;
+  }
+
+  /** long constant for zero */
+  public static final Long ZERO_L = Long.valueOf(0L);
+  public static final String NINES = "99999999999999";
+  public static final String ZEROES = "00000000000000";
+
+  // For migration
+
+  /** name of version file */
+  public static final String VERSION_FILE_NAME = "hbase.version";
+
+  /**
+   * Current version of file system.
+   * Version 4 supports only one kind of bloom filter.
+   * Version 5 changes versions in catalog table regions.
+   * Version 6 enables blockcaching on catalog tables.
+   * Version 7 introduces hfile -- hbase 0.19 to 0.20..
+   */
+  // public static final String FILE_SYSTEM_VERSION = "6";
+  public static final String FILE_SYSTEM_VERSION = "7";
+
+  // Configuration parameters
+
+  //TODO: Is having HBase homed on port 60k OK?
+
+  /** Cluster is in distributed mode or not */
+  public static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed";
+
+  /** Config for pluggable load balancers */
+  public static final String HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class";
+
+  /** Cluster is standalone or pseudo-distributed */
+  public static final boolean CLUSTER_IS_LOCAL = false;
+
+  /** Cluster is fully-distributed */
+  public static final boolean CLUSTER_IS_DISTRIBUTED = true;
+
+  /** Default value for cluster distributed mode */
+  public static final boolean DEFAULT_CLUSTER_DISTRIBUTED = CLUSTER_IS_LOCAL;
+
+  /** default host address */
+  public static final String DEFAULT_HOST = "0.0.0.0";
+
+  /** Parameter name for port master listens on. */
+  public static final String MASTER_PORT = "hbase.master.port";
+
+  /** default port that the master listens on */
+  public static final int DEFAULT_MASTER_PORT = 60000;
+
+  /** default port for master web api */
+  public static final int DEFAULT_MASTER_INFOPORT = 60010;
+
+  /** Configuration key for master web API port */
+  public static final String MASTER_INFO_PORT = "hbase.master.info.port";
+
+  /** Parameter name for the master type being backup (waits for primary to go inactive). */
+  public static final String MASTER_TYPE_BACKUP = "hbase.master.backup";
+
+  /** by default every master is a possible primary master unless the conf explicitly overrides it */
+  public static final boolean DEFAULT_MASTER_TYPE_BACKUP = false;
+
+  /** Parameter name for ZooKeeper session time out.*/
+  public static final String ZOOKEEPER_SESSION_TIMEOUT =
+    "zookeeper.session.timeout";
+
+  /** Name of ZooKeeper quorum configuration parameter. */
+  public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
+
+  /** Name of ZooKeeper config file in conf/ directory. */
+  public static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg";
+
+  /** Common prefix of ZooKeeper configuration properties */
+  public static final String ZK_CFG_PROPERTY_PREFIX =
+      "hbase.zookeeper.property.";
+
+  public static final int ZK_CFG_PROPERTY_PREFIX_LEN =
+      ZK_CFG_PROPERTY_PREFIX.length();
+
+  /**
+   * The ZK client port key in the ZK properties map. The name reflects the
+   * fact that this is not an HBase configuration key.
+   */
+  public static final String CLIENT_PORT_STR = "clientPort";
+
+  /** Parameter name for the client port that the zookeeper listens on */
+  public static final String ZOOKEEPER_CLIENT_PORT =
+      ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR;
+
+  /** Default client port that the zookeeper listens on */
+  public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181;
+
+  /** Parameter name for the wait time for the recoverable zookeeper */
+  public static final String ZOOKEEPER_RECOVERABLE_WAITTIME = "hbase.zookeeper.recoverable.waittime";
+
+  /** Default wait time for the recoverable zookeeper */
+  public static final long DEFAULT_ZOOKEPER_RECOVERABLE_WAITIME = 10000;
+
+  /** Parameter name for the root dir in ZK for this cluster */
+  public static final String ZOOKEEPER_ZNODE_PARENT = "zookeeper.znode.parent";
+
+  public static final String DEFAULT_ZOOKEEPER_ZNODE_PARENT = "/hbase";
+
+  /**
+   * Parameter name for the limit on concurrent client-side zookeeper
+   * connections
+   */
+  public static final String ZOOKEEPER_MAX_CLIENT_CNXNS =
+      ZK_CFG_PROPERTY_PREFIX + "maxClientCnxns";
+
+  /** Parameter name for the ZK data directory */
+  public static final String ZOOKEEPER_DATA_DIR =
+      ZK_CFG_PROPERTY_PREFIX + "dataDir";
+
+  /** Default limit on concurrent client-side zookeeper connections */
+  public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 300;
+
+  /** Configuration key for ZooKeeper session timeout */
+  public static final String ZK_SESSION_TIMEOUT = "zookeeper.session.timeout";
+
+  /** Default value for ZooKeeper session timeout */
+  public static final int DEFAULT_ZK_SESSION_TIMEOUT = 180 * 1000;
+
+  /** Parameter name for port region server listens on. */
+  public static final String REGIONSERVER_PORT = "hbase.regionserver.port";
+
+  /** Default port region server listens on. */
+  public static final int DEFAULT_REGIONSERVER_PORT = 60020;
+
+  /** default port for region server web api */
+  public static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
+
+  /** A configuration key for regionserver info port */
+  public static final String REGIONSERVER_INFO_PORT =
+    "hbase.regionserver.info.port";
+
+  /** A flag that enables automatic selection of regionserver info port */
+  public static final String REGIONSERVER_INFO_PORT_AUTO =
+      REGIONSERVER_INFO_PORT + ".auto";
+
+  /** Parameter name for what region server implementation to use. */
+  public static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
+
+  /** Parameter name for what master implementation to use. */
+  public static final String MASTER_IMPL= "hbase.master.impl";
+
+  /** Parameter name for how often threads should wake up */
+  public static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
+
+  /** Default value for thread wake frequency */
+  public static final int DEFAULT_THREAD_WAKE_FREQUENCY = 10 * 1000;
+
+  /** Parameter name for how often we should try to write a version file, before failing */
+  public static final String VERSION_FILE_WRITE_ATTEMPTS = "hbase.server.versionfile.writeattempts";
+
+  /** Parameter name for how often we should try to write a version file, before failing */
+  public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3;
+
+  /** Parameter name for how often a region should should perform a major compaction */
+  public static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction";
+
+  /** Parameter name for HBase instance root directory */
+  public static final String HBASE_DIR = "hbase.rootdir";
+
+  /** Parameter name for HBase client IPC pool type */
+  public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type";
+
+  /** Parameter name for HBase client IPC pool size */
+  public static final String HBASE_CLIENT_IPC_POOL_SIZE = "hbase.client.ipc.pool.size";
+
+  /** Parameter name for HBase client operation timeout, which overrides RPC timeout */
+  public static final String HBASE_CLIENT_OPERATION_TIMEOUT = "hbase.client.operation.timeout";
+
+  /** Default HBase client operation timeout, which is tantamount to a blocking call */
+  public static final int DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = Integer.MAX_VALUE;
+
+  /** Used to construct the name of the log directory for a region server
+   * Use '.' as a special character to seperate the log files from table data */
+  public static final String HREGION_LOGDIR_NAME = ".logs";
+
+  /** Used to construct the name of the splitlog directory for a region server */
+  public static final String SPLIT_LOGDIR_NAME = "splitlog";
+
+  public static final String CORRUPT_DIR_NAME = ".corrupt";
+
+  /** Like the previous, but for old logs that are about to be deleted */
+  public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs";
+
+  /** Used to construct the name of the compaction directory during compaction */
+  public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
+
+  /** Conf key for the max file size after which we split the region */
+  public static final String HREGION_MAX_FILESIZE =
+      "hbase.hregion.max.filesize";
+
+  /** Default maximum file size */
+  public static final long DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+
+  /**
+   * The max number of threads used for opening and closing stores or store
+   * files in parallel
+   */
+  public static final String HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
+    "hbase.hstore.open.and.close.threads.max";
+
+  /**
+   * The default number for the max number of threads used for opening and
+   * closing stores or store files in parallel
+   */
+  public static final int DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX = 1;
+
+
+  /** Conf key for the memstore size at which we flush the memstore */
+  public static final String HREGION_MEMSTORE_FLUSH_SIZE =
+      "hbase.hregion.memstore.flush.size";
+
+  /** Default size of a reservation block   */
+  public static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
+
+  /** Maximum value length, enforced on KeyValue construction */
+  public static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE;
+
+  /** name of the file for unique cluster ID */
+  public static final String CLUSTER_ID_FILE_NAME = "hbase.id";
+
+  /** Configuration key storing the cluster ID */
+  public static final String CLUSTER_ID = "hbase.cluster.id";
+
+  // Always store the location of the root table's HRegion.
+  // This HRegion is never split.
+
+  // region name = table + startkey + regionid. This is the row key.
+  // each row in the root and meta tables describes exactly 1 region
+  // Do we ever need to know all the information that we are storing?
+
+  // Note that the name of the root table starts with "-" and the name of the
+  // meta table starts with "." Why? it's a trick. It turns out that when we
+  // store region names in memory, we use a SortedMap. Since "-" sorts before
+  // "." (and since no other table name can start with either of these
+  // characters, the root region will always be the first entry in such a Map,
+  // followed by all the meta regions (which will be ordered by their starting
+  // row key as well), followed by all user tables. So when the Master is
+  // choosing regions to assign, it will always choose the root region first,
+  // followed by the meta regions, followed by user regions. Since the root
+  // and meta regions always need to be on-line, this ensures that they will
+  // be the first to be reassigned if the server(s) they are being served by
+  // should go down.
+
+  /** The root table's name.*/
+  public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
+
+  /** The META table's name. */
+  public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
+
+  /** delimiter used between portions of a region name */
+  public static final int META_ROW_DELIMITER = ',';
+
+  /** The catalog family as a string*/
+  public static final String CATALOG_FAMILY_STR = "info";
+
+  /** The catalog family */
+  public static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR);
+
+  /** The RegionInfo qualifier as a string */
+  public static final String REGIONINFO_QUALIFIER_STR = "regioninfo";
+
+  /** The regioninfo column qualifier */
+  public static final byte [] REGIONINFO_QUALIFIER =
+    Bytes.toBytes(REGIONINFO_QUALIFIER_STR);
+
+  /** The server column qualifier */
+  public static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server");
+
+  /** The startcode column qualifier */
+  public static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode");
+
+  /** The lower-half split region column qualifier */
+  public static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA");
+
+  /** The upper-half split region column qualifier */
+  public static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB");
+
+  /**
+   * The meta table version column qualifier.
+   * We keep current version of the meta table in this column in <code>-ROOT-</code>
+   * table: i.e. in the 'info:v' column.
+   */
+  public static final byte [] META_VERSION_QUALIFIER = Bytes.toBytes("v");
+
+  /**
+   * The current version of the meta table.
+   * Before this the meta had HTableDescriptor serialized into the HRegionInfo;
+   * i.e. pre-hbase 0.92.  There was no META_VERSION column in the root table
+   * in this case.  The presence of a version and its value being zero indicates
+   * meta is up-to-date.
+   */
+  public static final short META_VERSION = 0;
+
+  // Other constants
+
+  /**
+   * An empty instance.
+   */
+  public static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
+
+  /**
+   * Used by scanners, etc when they want to start at the beginning of a region
+   */
+  public static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
+
+  /**
+   * Last row in a table.
+   */
+  public static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
+
+  /**
+    * Used by scanners and others when they're trying to detect the end of a
+    * table
+    */
+  public static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
+
+  /**
+   * Max length a row can have because of the limitation in TFile.
+   */
+  public static final int MAX_ROW_LENGTH = Short.MAX_VALUE;
+
+  /** When we encode strings, we always specify UTF8 encoding */
+  public static final String UTF8_ENCODING = "UTF-8";
+
+  /**
+   * Timestamp to use when we want to refer to the latest cell.
+   * This is the timestamp sent by clients when no timestamp is specified on
+   * commit.
+   */
+  public static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
+
+  /**
+   * Timestamp to use when we want to refer to the oldest cell.
+   */
+  public static final long OLDEST_TIMESTAMP = Long.MIN_VALUE;
+
+  /**
+   * LATEST_TIMESTAMP in bytes form
+   */
+  public static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP);
+
+  /**
+   * Define for 'return-all-versions'.
+   */
+  public static final int ALL_VERSIONS = Integer.MAX_VALUE;
+
+  /**
+   * Unlimited time-to-live.
+   */
+//  public static final int FOREVER = -1;
+  public static final int FOREVER = Integer.MAX_VALUE;
+
+  /**
+   * Seconds in a week
+   */
+  public static final int WEEK_IN_SECONDS = 7 * 24 * 3600;
+
+  //TODO: although the following are referenced widely to format strings for
+  //      the shell. They really aren't a part of the public API. It would be
+  //      nice if we could put them somewhere where they did not need to be
+  //      public. They could have package visibility
+  public static final String NAME = "NAME";
+  public static final String VERSIONS = "VERSIONS";
+  public static final String IN_MEMORY = "IN_MEMORY";
+  public static final String CONFIG = "CONFIG";
+
+  /**
+   * This is a retry backoff multiplier table similar to the BSD TCP syn
+   * backoff table, a bit more aggressive than simple exponential backoff.
+   */
+  public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 };
+
+  public static final String REGION_IMPL = "hbase.hregion.impl";
+
+  /** modifyTable op for replacing the table descriptor */
+  public static enum Modify {
+    CLOSE_REGION,
+    TABLE_COMPACT,
+    TABLE_FLUSH,
+    TABLE_MAJOR_COMPACT,
+    TABLE_SET_HTD,
+    TABLE_SPLIT
+  }
+
+  /**
+   * Scope tag for locally scoped data.
+   * This data will not be replicated.
+   */
+  public static final int REPLICATION_SCOPE_LOCAL = 0;
+
+  /**
+   * Scope tag for globally scoped data.
+   * This data will be replicated to all peers.
+   */
+  public static final int REPLICATION_SCOPE_GLOBAL = 1;
+
+  /**
+   * Default cluster ID, cannot be used to identify a cluster so a key with
+   * this value means it wasn't meant for replication.
+   */
+  public static final UUID DEFAULT_CLUSTER_ID = new UUID(0L,0L);
+
+    /**
+     * Parameter name for maximum number of bytes returned when calling a
+     * scanner's next method.
+     */
+  public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
+
+  /**
+   * Maximum number of bytes returned when calling a scanner's next method.
+   * Note that when a single row is larger than this limit the row is still
+   * returned completely.
+   *
+   * The default value is unlimited.
+   */
+  public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE;
+
+  /**
+   * Parameter name for client pause value, used mostly as value to wait
+   * before running a retry of a failed get, region lookup, etc.
+   */
+  public static String HBASE_CLIENT_PAUSE = "hbase.client.pause";
+
+  /**
+   * Default value of {@link #HBASE_CLIENT_PAUSE}.
+   */
+  public static long DEFAULT_HBASE_CLIENT_PAUSE = 1000;
+
+  /**
+   * Parameter name for maximum retries, used as maximum for all retryable
+   * operations such as fetching of the root region from root region server,
+   * getting a cell's value, starting a row update, etc.
+   */
+  public static String HBASE_CLIENT_RETRIES_NUMBER = "hbase.client.retries.number";
+
+  /**
+   * Default value of {@link #HBASE_CLIENT_RETRIES_NUMBER}.
+   */
+  public static int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
+
+  /**
+   * Parameter name for maximum attempts, used to limit the number of times the
+   * client will try to obtain the proxy for a given region server.
+   */
+  public static String HBASE_CLIENT_RPC_MAXATTEMPTS = "hbase.client.rpc.maxattempts";
+
+  /**
+   * Default value of {@link #HBASE_CLIENT_RPC_MAXATTEMPTS}.
+   */
+  public static int DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS = 1;
+
+  /**
+   * Parameter name for client prefetch limit, used as the maximum number of regions
+   * info that will be prefetched.
+   */
+  public static String HBASE_CLIENT_PREFETCH_LIMIT = "hbase.client.prefetch.limit";
+
+  /**
+   * Default value of {@link #HBASE_CLIENT_PREFETCH_LIMIT}.
+   */
+  public static int DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT = 10;
+
+  /**
+   * Parameter name for number of rows that will be fetched when calling next on
+   * a scanner if it is not served from memory. Higher caching values will
+   * enable faster scanners but will eat up more memory and some calls of next
+   * may take longer and longer times when the cache is empty.
+   */
+  public static String HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
+
+  /**
+   * Default value of {@link #HBASE_META_SCANNER_CACHING}.
+   */
+  public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100;
+
+  /**
+   * Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
+   * instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
+   * for all intents and purposes, are the same except for their instance ids,
+   * then they will not be able to share the same {@link org.apache.hadoop.hbase.client.HConnection} instance.
+   * On the other hand, even if the instance ids are the same, it could result
+   * in non-shared {@link org.apache.hadoop.hbase.client.HConnection}
+   * instances if some of the other connection parameters differ.
+   */
+  public static String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
+
+  /**
+   * HRegion server lease period in milliseconds. Clients must report in within this period
+   * else they are considered dead. Unit measured in ms (milliseconds).
+   */
+  public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY =
+    "hbase.regionserver.lease.period";
+
+  /**
+   * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}.
+   */
+  public static long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000;
+
+  /**
+   * timeout for each RPC
+   */
+  public static String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout";
+
+  /**
+   * Default value of {@link #HBASE_RPC_TIMEOUT_KEY}
+   */
+  public static int DEFAULT_HBASE_RPC_TIMEOUT = 60000;
+
+  /*
+   * cluster replication constants.
+   */
+  public static final String
+      REPLICATION_ENABLE_KEY = "hbase.replication";
+  public static final String
+      REPLICATION_SOURCE_SERVICE_CLASSNAME = "hbase.replication.source.service";
+  public static final String
+      REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service";
+  public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT =
+    "org.apache.hadoop.hbase.replication.regionserver.Replication";
+
+  /** HBCK special code name used as server name when manipulating ZK nodes */
+  public static final String HBCK_CODE_NAME = "HBCKServerName";
+
+  public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER =
+    "hbase.regionserver.hostname.seen.by.master";
+
+  public static final String HBASE_MASTER_LOGCLEANER_PLUGINS =
+      "hbase.master.logcleaner.plugins";
+
+  public static final String HBASE_REGION_SPLIT_POLICY_KEY =
+    "hbase.regionserver.region.split.policy";
+
+  /**
+   * Configuration key for the size of the block cache
+   */
+  public static final String HFILE_BLOCK_CACHE_SIZE_KEY =
+    "hfile.block.cache.size";
+
+  public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.25f;
+
+  /*
+    * Minimum percentage of free heap necessary for a successful cluster startup.
+    */
+  public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f;
+
+  public static final List<String> HBASE_NON_USER_TABLE_DIRS = new ArrayList<String>(
+      Arrays.asList(new String[]{ HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME,
+          CORRUPT_DIR_NAME, Bytes.toString(META_TABLE_NAME),
+          Bytes.toString(ROOT_TABLE_NAME), SPLIT_LOGDIR_NAME }));
+
+  public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile
+      ("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
+  public static final Pattern CP_HTD_ATTR_VALUE_PATTERN =
+      Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
+
+  public static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
+  public static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
+  public static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
+      "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
+      CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
+
+  /** The delay when re-trying a socket operation in a loop (HBASE-4712) */
+  public static final int SOCKET_RETRY_WAIT_MS = 200;
+
+  /** Host name of the local machine */
+  public static final String LOCALHOST = "localhost";
+
+  /**
+   * If this parameter is set to true, then hbase will read
+   * data and then verify checksums. Checksum verification
+   * inside hdfs will be switched off.  However, if the hbase-checksum
+   * verification fails, then it will switch back to using
+   * hdfs checksums for verifiying data that is being read from storage.
+   *
+   * If this parameter is set to false, then hbase will not
+   * verify any checksums, instead it will depend on checksum verification
+   * being done in the hdfs client.
+   */
+  public static final String HBASE_CHECKSUM_VERIFICATION =
+      "hbase.regionserver.checksum.verify";
+
+  public static final String LOCALHOST_IP = "127.0.0.1";
+
+  /** Conf key that enables distributed log splitting */
+  public static final String DISTRIBUTED_LOG_SPLITTING_KEY =
+      "hbase.master.distributed.log.splitting";
+
+  /**
+   * The name of the configuration parameter that specifies
+   * the number of bytes in a newly created checksum chunk.
+   */
+  public static final String BYTES_PER_CHECKSUM =
+      "hbase.hstore.bytes.per.checksum";
+
+  /**
+   * The name of the configuration parameter that specifies
+   * the name of an algorithm that is used to compute checksums
+   * for newly created blocks.
+   */
+  public static final String CHECKSUM_TYPE_NAME =
+      "hbase.hstore.checksum.algorithm";
+
+  /** Enable file permission modification from standard hbase */
+  public static final String ENABLE_DATA_FILE_UMASK = "hbase.data.umask.enable";
+  /** File permission umask to use when creating hbase data files */
+  public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask";
+
+  /** Configuration name of HLog Compression */
+  public static final String ENABLE_WAL_COMPRESSION =
+    "hbase.regionserver.wal.enablecompression";
+
+/** Region in Transition metrics threshold time */
+  public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold";
+
+  public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";
+
+  /**
+   * The byte array represents for NO_NEXT_INDEXED_KEY;
+   * The actual value is irrelevant because this is always compared by reference.
+   */
+  public static final byte [] NO_NEXT_INDEXED_KEY = Bytes.toBytes("NO_NEXT_INDEXED_KEY");
+
+  private HConstants() {
+    // Can't be instantiated with this ctor.
+  }
+}

Added: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java?rev=1344510&view=auto
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java (added)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java Wed May 30 23:51:44 2012
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.lang.annotation.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A package attribute that captures the version of hbase that was compiled.
+ * Copied down from hadoop.  All is same except name of interface.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.PACKAGE)
+@InterfaceAudience.Private
+public @interface VersionAnnotation {
+
+  /**
+   * Get the Hadoop version
+   * @return the version string "0.6.3-dev"
+   */
+  String version();
+
+  /**
+   * Get the username that compiled Hadoop.
+   */
+  String user();
+
+  /**
+   * Get the date when Hadoop was compiled.
+   * @return the date in unix 'date' format
+   */
+  String date();
+
+  /**
+   * Get the url for the subversion repository.
+   */
+  String url();
+
+  /**
+   * Get the subversion revision.
+   * @return the revision number as a string (eg. "451451")
+   */
+  String revision();
+}

Added: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java?rev=1344510&view=auto
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java (added)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java Wed May 30 23:51:44 2012
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
+
+/**
+ * A byte sequence that is usable as a key or value.  Based on
+ * {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable
+ * and DOES NOT distinguish between the size of the seqeunce and the current
+ * capacity as {@link org.apache.hadoop.io.BytesWritable} does. Hence its
+ * comparatively 'immutable'. When creating a new instance of this class,
+ * the underlying byte [] is not copied, just referenced.  The backing
+ * buffer is accessed when we go to serialize.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ImmutableBytesWritable
+implements WritableComparable<ImmutableBytesWritable> {
+  private byte[] bytes;
+  private int offset;
+  private int length;
+
+  /**
+   * Create a zero-size sequence.
+   */
+  public ImmutableBytesWritable() {
+    super();
+  }
+
+  /**
+   * Create a ImmutableBytesWritable using the byte array as the initial value.
+   * @param bytes This array becomes the backing storage for the object.
+   */
+  public ImmutableBytesWritable(byte[] bytes) {
+    this(bytes, 0, bytes.length);
+  }
+
+  /**
+   * Set the new ImmutableBytesWritable to the contents of the passed
+   * <code>ibw</code>.
+   * @param ibw the value to set this ImmutableBytesWritable to.
+   */
+  public ImmutableBytesWritable(final ImmutableBytesWritable ibw) {
+    this(ibw.get(), 0, ibw.getSize());
+  }
+
+  /**
+   * Set the value to a given byte range
+   * @param bytes the new byte range to set to
+   * @param offset the offset in newData to start at
+   * @param length the number of bytes in the range
+   */
+  public ImmutableBytesWritable(final byte[] bytes, final int offset,
+      final int length) {
+    this.bytes = bytes;
+    this.offset = offset;
+    this.length = length;
+  }
+
+  /**
+   * Get the data from the BytesWritable.
+   * @return The data is only valid between offset and offset+length.
+   */
+  public byte [] get() {
+    if (this.bytes == null) {
+      throw new IllegalStateException("Uninitialiized. Null constructor " +
+        "called w/o accompaying readFields invocation");
+    }
+    return this.bytes;
+  }
+
+  /**
+   * @param b Use passed bytes as backing array for this instance.
+   */
+  public void set(final byte [] b) {
+    set(b, 0, b.length);
+  }
+
+  /**
+   * @param b Use passed bytes as backing array for this instance.
+   * @param offset
+   * @param length
+   */
+  public void set(final byte [] b, final int offset, final int length) {
+    this.bytes = b;
+    this.offset = offset;
+    this.length = length;
+  }
+
+  /**
+   * @return the number of valid bytes in the buffer
+   */
+  public int getSize() {
+    if (this.bytes == null) {
+      throw new IllegalStateException("Uninitialiized. Null constructor " +
+        "called w/o accompaying readFields invocation");
+    }
+    return this.length;
+  }
+
+  /**
+   * @return the number of valid bytes in the buffer
+   */
+  //Should probably deprecate getSize() so that we keep the same calls for all
+  //byte []
+  public int getLength() {
+    if (this.bytes == null) {
+      throw new IllegalStateException("Uninitialiized. Null constructor " +
+        "called w/o accompaying readFields invocation");
+    }
+    return this.length;
+  }
+
+  /**
+   * @return offset
+   */
+  public int getOffset(){
+    return this.offset;
+  }
+
+  public void readFields(final DataInput in) throws IOException {
+    this.length = in.readInt();
+    this.bytes = new byte[this.length];
+    in.readFully(this.bytes, 0, this.length);
+    this.offset = 0;
+  }
+
+  public void write(final DataOutput out) throws IOException {
+    out.writeInt(this.length);
+    out.write(this.bytes, this.offset, this.length);
+  }
+
+  // Below methods copied from BytesWritable
+  @Override
+  public int hashCode() {
+    int hash = 1;
+    for (int i = offset; i < offset + length; i++)
+      hash = (31 * hash) + (int)bytes[i];
+    return hash;
+  }
+
+  /**
+   * Define the sort order of the BytesWritable.
+   * @param that The other bytes writable
+   * @return Positive if left is bigger than right, 0 if they are equal, and
+   *         negative if left is smaller than right.
+   */
+  public int compareTo(ImmutableBytesWritable that) {
+    return WritableComparator.compareBytes(
+      this.bytes, this.offset, this.length,
+      that.bytes, that.offset, that.length);
+  }
+
+  /**
+   * Compares the bytes in this object to the specified byte array
+   * @param that
+   * @return Positive if left is bigger than right, 0 if they are equal, and
+   *         negative if left is smaller than right.
+   */
+  public int compareTo(final byte [] that) {
+    return WritableComparator.compareBytes(
+      this.bytes, this.offset, this.length,
+      that, 0, that.length);
+  }
+
+  /**
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
+  @Override
+  public boolean equals(Object right_obj) {
+    if (right_obj instanceof byte []) {
+      return compareTo((byte [])right_obj) == 0;
+    }
+    if (right_obj instanceof ImmutableBytesWritable) {
+      return compareTo((ImmutableBytesWritable)right_obj) == 0;
+    }
+    return false;
+  }
+
+  /**
+   * @see java.lang.Object#toString()
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder(3*this.bytes.length);
+    for (int idx = offset; idx < offset + length; idx++) {
+      // if not the first, put a blank separator in
+      if (idx != offset) {
+        sb.append(' ');
+      }
+      String num = Integer.toHexString(bytes[idx]);
+      // if it is only one digit, add a leading 0.
+      if (num.length() < 2) {
+        sb.append('0');
+      }
+      sb.append(num);
+    }
+    return sb.toString();
+  }
+
+  /** A Comparator optimized for ImmutableBytesWritable.
+   */
+  public static class Comparator extends WritableComparator {
+    private BytesWritable.Comparator comparator =
+      new BytesWritable.Comparator();
+
+    /** constructor */
+    public Comparator() {
+      super(ImmutableBytesWritable.class);
+    }
+
+    /**
+     * @see org.apache.hadoop.io.WritableComparator#compare(byte[], int, int, byte[], int, int)
+     */
+    @Override
+    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
+      return comparator.compare(b1, s1, l1, b2, s2, l2);
+    }
+  }
+
+  static { // register this comparator
+    WritableComparator.define(ImmutableBytesWritable.class, new Comparator());
+  }
+
+  /**
+   * @param array List of byte [].
+   * @return Array of byte [].
+   */
+  public static byte [][] toArray(final List<byte []> array) {
+    // List#toArray doesn't work on lists of byte [].
+    byte[][] results = new byte[array.size()][];
+    for (int i = 0; i < array.size(); i++) {
+      results[i] = array.get(i);
+    }
+    return results;
+  }
+
+  /**
+   * Returns a copy of the bytes referred to by this writable
+   */
+  public byte[] copyBytes() {
+    return Arrays.copyOfRange(bytes, offset, offset+length);
+  }
+}



Mime
View raw message