hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1485561 - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/resources/ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ hbase-server/src/test/java/org/apache/hadoop/hbase...
Date Thu, 23 May 2013 04:11:13 GMT
Author: stack
Date: Thu May 23 04:11:12 2013
New Revision: 1485561

URL: http://svn.apache.org/r1485561
Log:
HBASE-8450 Update hbase-default.xml and general recommendations to better suit current hw, h2, experience, etc.

Modified:
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hbase/trunk/hbase-common/src/main/resources/hbase-default.xml
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Thu May 23 04:11:12 2013
@@ -118,7 +118,7 @@ public class HColumnDescriptor implement
   /**
    * Default number of versions of a record to keep.
    */
-  public static final int DEFAULT_VERSIONS = 3;
+  public static final int DEFAULT_VERSIONS = 1;
 
   /**
    * Default is not to keep a minimum of versions.
@@ -151,7 +151,7 @@ public class HColumnDescriptor implement
    * is enabled.
    */
   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
-  
+
   /**
    * Default setting for whether to cache index blocks on write if block
    * caching is enabled.
@@ -166,7 +166,7 @@ public class HColumnDescriptor implement
   /**
    * Default setting for whether or not to use bloomfilters.
    */
-  public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString();
+  public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
 
   /**
    * Default setting for whether to cache bloom filter blocks on write if block
@@ -543,7 +543,7 @@ public class HColumnDescriptor implement
     return Compression.Algorithm.valueOf(n.toUpperCase());
   }
 
-  /** @return compression type being used for the column family for major 
+  /** @return compression type being used for the column family for major
       compression */
   public Compression.Algorithm getCompactionCompression() {
     String n = getValue(COMPRESSION_COMPACT);

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Thu May 23 04:11:12 2013
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
@@ -1287,6 +1288,8 @@ public class HTableDescriptor implements
               .setInMemory(true)
               .setBlocksize(8 * 1024)
               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+              // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+              .setBloomFilterType(BloomType.NONE)
       });
 
   static {

Modified: hbase/trunk/hbase-common/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/resources/hbase-default.xml?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/resources/hbase-default.xml (original)
+++ hbase/trunk/hbase-common/src/main/resources/hbase-default.xml Thu May 23 04:11:12 2013
@@ -19,26 +19,52 @@
  * limitations under the License.
  */
 -->
+
+<!--
+OVERVIEW
+
+The important configs. are listed near the top.  You should change
+at least the setting for hbase.tmp.dir.  Other settings will change
+dependent on whether you are running hbase in standalone mode or
+distributed.  See the hbase reference guide for requirements and
+guidance making configuration.
+
+This file does not contain all possible configurations.  The file would be
+much larger if it carried everything. The absent configurations will only be
+found through source code reading.  The idea is that such configurations are
+exotic and only those who would go to the trouble of reading a particular
+section in the code would be knowledgeable or invested enough in ever wanting
+to alter such configurations, so we do not list them here.  Listing all
+possible configurations would overwhelm and obscure the important.
+-->
+
 <configuration>
+  <!--Configs you will likely change are listed here at the top of the file.
+  -->
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>${java.io.tmpdir}/hbase-${user.name}</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp', the usual resolve for java.io.tmpdir, as the
+    '/tmp' directory is cleared on machine restart.
+    </description>
+  </property>
   <property>
     <name>hbase.rootdir</name>
-    <value>file:///tmp/hbase-${user.name}/hbase</value>
+    <value>file://${hbase.tmp.dir}/hbase</value>
     <description>The directory shared by region servers and into
     which HBase persists.  The URL should be 'fully-qualified'
     to include the filesystem scheme.  For example, to specify the
     HDFS directory '/hbase' where the HDFS instance's namenode is
     running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
+    hdfs://namenode.example.org:9000/hbase.  By default, we write
+    to whatever ${hbase.tmp.dir} is set too -- usually /tmp --
+    so change this configuration or else all data will be lost on
+    machine restart.
     </description>
   </property>
   <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property>
     <name>hbase.cluster.distributed</name>
     <value>false</value>
     <description>The mode the cluster will be in. Possible values are
@@ -48,14 +74,20 @@
     </description>
   </property>
   <property>
-    <name>hbase.tmp.dir</name>
-    <value>${java.io.tmpdir}/hbase-${user.name}</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which hbase will start/stop ZooKeeper on as
+    part of cluster start/stop.
     </description>
   </property>
+  <!--The above are the important configurations for getting hbase up
+    and running -->
+
   <property>
     <name>hbase.local.dir</name>
     <value>${hbase.tmp.dir}/local/</value>
@@ -63,6 +95,13 @@
     as a local storage.
     </description>
   </property>
+
+  <!--Master configurations-->
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
   <property>
     <name>hbase.master.info.port</name>
     <value>60010</value>
@@ -77,101 +116,91 @@
     </description>
   </property>
   <property>
-    <name>hbase.client.write.buffer</name>
-    <value>2097152</value>
-    <description>Default size of the HTable clien write buffer in bytes.
-    A bigger buffer takes more memory -- on both the client and server
-    side since server instantiates the passed write buffer to process
-    it -- but a larger buffer size reduces the number of RPCs made.
-    For an estimate of server-side memory-used, evaluate
-    hbase.client.write.buffer * hbase.regionserver.handler.count
+    <name>hbase.master.logcleaner.plugins</name>
+    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
+    <description>A comma-separated list of LogCleanerDelegate invoked by
+    the LogsCleaner service. These WAL/HLog cleaners are called in order,
+    so put the HLog cleaner that prunes the most HLog files in front. To
+    implement your own LogCleanerDelegate, just put it in HBase's classpath
+    and add the fully qualified class name here. Always add the above
+    default log cleaners in the list.
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.port</name>
-    <value>60020</value>
-    <description>The port the HBase RegionServer binds to.
+    <name>hbase.master.logcleaner.ttl</name>
+    <value>600000</value>
+    <description>Maximum time a HLog can stay in the .oldlogdir directory,
+    after which it will be cleaned by a Master thread.
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI
-    Set to -1 if you do not want the RegionServer UI to run.
+    <name>hbase.master.hfilecleaner.plugins</name>
+    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
+    <description>A comma-separated list of HFileCleanerDelegate invoked by
+    the HFileCleaner service. These HFiles cleaners are called in order,
+    so put the cleaner that prunes the most files in front. To
+    implement your own HFileCleanerDelegate, just put it in HBase's classpath
+    and add the fully qualified class name here. Always add the above
+    default log cleaners in the list as they will be overwritten in hbase-site.xml.
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>false</value>
-    <description>Whether or not the Master or RegionServer
-    UI should search for a port to bind to. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Useful for testing, turned off by default.
+    <name>hbase.master.catalog.timeout</name>
+    <value>600000</value>
+    <description>Timeout value for the Catalog Janitor from the master to META.
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The address for the HBase RegionServer web UI
+    <name>hbase.master.dns.interface</name>
+    <value>default</value>
+    <description>The name of the Network Interface from which a master
+      should report its IP address.
     </description>
   </property>
   <property>
-    <name>hbase.client.pause</name>
-    <value>1000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.client.retries.number</name>
-    <value>10</value>
-    <description>Maximum retries.  Used as maximum for all retryable
-    operations such as fetching of the root region from root region
-    server, getting a cell's value, starting a row update, etc.
-    Default: 10.
+    <name>hbase.master.dns.nameserver</name>
+    <value>default</value>
+    <description>The host name or IP address of the name server (DNS)
+      which a master should use to determine the host name used
+      for communication and display purposes.
     </description>
   </property>
+
+  <!--RegionServer configurations-->
   <property>
-    <name>hbase.bulkload.retries.number</name>
-    <value>0</value>
-    <description>Maximum retries.  This is maximum number of iterations
-    to atomic bulk loads are attempted in the face of splitting operations
-    0 means never give up.  Default: 0.
+    <name>hbase.regionserver.port</name>
+    <value>60020</value>
+    <description>The port the HBase RegionServer binds to.
     </description>
   </property>
   <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.client.scanner.timeout.period
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI
+    Set to -1 if you do not want the RegionServer UI to run.
     </description>
   </property>
   <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
+    <name>hbase.regionserver.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The address for the HBase RegionServer web UI
     </description>
   </property>
   <property>
-    <name>hbase.client.scanner.timeout.period</name>
-    <value>60000</value>
-    <description>Client scanner lease period in milliseconds. Default is
-    60 seconds. </description>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>false</value>
+    <description>Whether or not the Master or RegionServer
+    UI should search for a port to bind to. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Useful for testing, turned off by default.
+    </description>
   </property>
   <property>
     <name>hbase.regionserver.handler.count</name>
-    <value>10</value>
+    <value>30</value>
     <description>Count of RPC Listener instances spun up on RegionServers.
     Same property is used by the Master for count of master handlers.
-    Default is 10.
+    Default is 30.
     </description>
   </property>
   <property>
@@ -224,28 +253,38 @@
     <description>The HLog file writer implementation.</description>
   </property>
   <property>
-    <name>hbase.regionserver.nbreservationblocks</name>
-    <value>4</value>
-    <description>The number of resevoir blocks of memory release on
-    OOME so we can cleanup properly before server shutdown.
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap.
+      Updates are blocked and flushes are forced until size of all memstores
+      in a region server hits hbase.regionserver.global.memstore.lowerLimit.
     </description>
   </property>
   <property>
-    <name>hbase.zookeeper.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a ZooKeeper server
-      should report its IP address.
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>Maximum size of all memstores in a region server before
+      flushes are forced. Defaults to 38% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
     </description>
   </property>
   <property>
-    <name>hbase.zookeeper.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a ZooKeeper server should use to determine the host name used by the
-      master for communication and display purposes.
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>3600000</value>
+    <description>
+    Maximum amount of time an edit lives in memory before being automatically flushed.
+    Default 1 hour. Set it to 0 to disable automatic flushing.
     </description>
   </property>
   <property>
+    <name>hbase.regionserver.catalog.timeout</name>
+    <value>600000</value>
+    <description>Timeout value for the Catalog Janitor from the regionserver to META.</description>
+  </property>
+  <property>
     <name>hbase.regionserver.dns.interface</name>
     <value>default</value>
     <description>The name of the Network Interface from which a region server
@@ -260,70 +299,226 @@
       master for communication and display purposes.
     </description>
   </property>
+
+  <!--ZooKeeper configuration-->
   <property>
-    <name>hbase.master.dns.interface</name>
+    <name>zookeeper.session.timeout</name>
+    <value>90000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.rootserver</name>
+    <value>root-region-server</value>
+    <description>Path to ZNode holding root region location. This is written by
+      the master and read by clients and region servers. If a relative path is
+      given, the parent folder will be ${zookeeper.znode.parent}. By default,
+      this means the root location is stored at /hbase/root-region-server.
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.acl.parent</name>
+    <value>acl</value>
+    <description>Root ZNode for access control lists.</description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.dns.interface</name>
     <value>default</value>
-    <description>The name of the Network Interface from which a master
+    <description>The name of the Network Interface from which a ZooKeeper server
       should report its IP address.
     </description>
   </property>
   <property>
-    <name>hbase.master.dns.nameserver</name>
+    <name>hbase.zookeeper.dns.nameserver</name>
     <value>default</value>
     <description>The host name or IP address of the name server (DNS)
-      which a master should use to determine the host name used
-      for communication and display purposes.
+      which a ZooKeeper server should use to determine the host name used by the
+      master for communication and display purposes.
     </description>
   </property>
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
   <property>
-    <name>hbase.balancer.period
-    </name>
-    <value>300000</value>
-    <description>Period at which the region balancer runs in the Master.
+    <name>hbase.zookeeper.peerport</name>
+    <value>2888</value>
+    <description>Port used by ZooKeeper peers to talk to each other.
+    Seehttp://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    for more information.
     </description>
   </property>
   <property>
-    <name>hbase.regions.slop</name>
-    <value>0.2</value>
-    <description>Rebalance if any regionserver has average + (average * slop) regions.
-    Default is 20% slop.
+    <name>hbase.zookeeper.leaderport</name>
+    <value>3888</value>
+    <description>Port used by ZooKeeper for leader election.
+    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    for more information.
     </description>
   </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
   <property>
-    <name>hbase.master.logcleaner.ttl</name>
-    <value>600000</value>
-    <description>Maximum time a HLog can stay in the .oldlogdir directory,
-    after which it will be cleaned by a Master thread.
+    <name>hbase.zookeeper.useMulti</name>
+    <value>false</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and
+    will not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
     </description>
   </property>
   <property>
-    <name>hbase.master.logcleaner.plugins</name>
-    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
-    <description>A comma-separated list of LogCleanerDelegate invoked by
-    the LogsCleaner service. These WAL/HLog cleaners are called in order,
-    so put the HLog cleaner that prunes the most HLog files in front. To
-    implement your own LogCleanerDelegate, just put it in HBase's classpath
-    and add the fully qualified class name here. Always add the above
-    default log cleaners in the list.
+    <name>hbase.config.read.zookeeper.config</name>
+    <value>false</value>
+    <description>
+        Set to true to allow HBaseConfiguration to read the
+        zoo.cfg file for ZooKeeper properties. Switching this to true
+        is not recommended, since the functionality of reading ZK
+        properties from a zoo.cfg file has been deprecated.
     </description>
   </property>
+  <!--
+  Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
+  All properties with an "hbase.zookeeper.property." prefix are converted for
+  ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
+  e.g.  "initLimit=10" you would append the following to your configuration:
+    <property>
+      <name>hbase.zookeeper.property.initLimit</name>
+      <value>10</value>
+    </property>
+  -->
   <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap.
-      Updates are blocked and flushes are forced until size of all memstores
-      in a region server hits hbase.regionserver.global.memstore.lowerLimit.
+    <name>hbase.zookeeper.property.initLimit</name>
+    <value>10</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The number of ticks that the initial synchronization phase can take.
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.35</value>
-    <description>Maximum size of all memstores in a region server before
-      flushes are forced. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
+    <name>hbase.zookeeper.property.syncLimit</name>
+    <value>5</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The number of ticks that can pass between sending a request and getting an
+    acknowledgment.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>${hbase.tmp.dir}/zookeeper</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The directory where the snapshot is stored.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.maxClientCnxns</name>
+    <value>300</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    Limit on number of concurrent connections (at the socket level) that a
+    single client, identified by IP address, may make to a single member of
+    the ZooKeeper ensemble. Set high to avoid zk connection issues running
+    standalone and pseudo-distributed.
+    </description>
+  </property>
+  <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
+
+  <!--Client configurations-->
+  <property>
+    <name>hbase.client.write.buffer</name>
+    <value>2097152</value>
+    <description>Default size of the HTable client write buffer in bytes.
+    A bigger buffer takes more memory -- on both the client and server
+    side since server instantiates the passed write buffer to process
+    it -- but a larger buffer size reduces the number of RPCs made.
+    For an estimate of server-side memory-used, evaluate
+    hbase.client.write.buffer * hbase.regionserver.handler.count
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>100</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>20</value>
+    <description>Maximum retries.  Used as maximum for all retryable
+    operations such as the getting of a cell's value, starting a row update, etc.
+    Default: 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.client.scanner.timeout.period
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+    <value>60000</value>
+    <description>Client scanner lease period in milliseconds. Default is
+    60 seconds. </description>
+  </property>
+
+  <!--Miscellaneous configuration-->
+  <property>
+    <name>hbase.bulkload.retries.number</name>
+    <value>0</value>
+    <description>Maximum retries.  This is maximum number of iterations
+    to atomic bulk loads are attempted in the face of splitting operations
+    0 means never give up.  Default: 0.
+    </description>
+  </property>
+  <property>
+    <name>hbase.balancer.period
+    </name>
+    <value>300000</value>
+    <description>Period at which the region balancer runs in the Master.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regions.slop</name>
+    <value>0.2</value>
+    <description>Rebalance if any regionserver has average + (average * slop) regions.
+    Default is 20% slop.
     </description>
   </property>
   <property>
@@ -343,14 +538,6 @@
     </description>
   </property>
   <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>3600000</value>
-    <description>
-    Maximum amount of time an edit lives in memory before being automatically flushed.
-    Default 1 hour. Set it to 0 to disable automatic flushing.
-    </description>
-  </property>
-  <property>
     <name>hbase.hregion.memstore.flush.size</name>
     <value>134217728</value>
     <description>
@@ -406,6 +593,29 @@
     </description>
   </property>
   <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>The time (in miliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: Set to 7 days.  Major compactions tend to
+    happen exactly when you need them least so enable them such that they run at
+    off-peak for your deploy; or, since this setting is on a periodicity that is
+    unlikely to match your loading, run the compactions via an external
+    invocation out of a cron job or some such.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>Jitter outer bound for major compactions.
+    On each regionserver, we multiply the hbase.region.majorcompaction
+    interval by some random fraction that is inside the bounds of this
+    maximum.  We then add this + or - product to when the next
+    major compaction is to run.  The idea is that major compaction
+    does happen on every regionserver at exactly the same time.  The
+    smaller this number, the closer the compactions come together.
+    </description>
+  </property>
+  <property>
     <name>hbase.hstore.compactionThreshold</name>
     <value>3</value>
     <description>
@@ -417,7 +627,7 @@
   </property>
   <property>
     <name>hbase.hstore.blockingStoreFiles</name>
-    <value>7</value>
+    <value>10</value>
     <description>
     If more than this number of StoreFiles in any one Store
     (one StoreFile is written per flush of MemStore) then updates are
@@ -442,14 +652,6 @@
     </description>
   </property>
   <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
     <name>hbase.storescanner.parallel.seek.enable</name>
     <value>false</value>
     <description>
@@ -479,19 +681,12 @@
   </property>
   <property>
     <name>hfile.block.cache.size</name>
-    <value>0.25</value>
+    <value>0.4</value>
     <description>
         Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hash.type</name>
-    <value>murmur</value>
-    <description>The hashing algorithm for use in HashFunction. Two values are
-    supported now: murmur (MurmurHash) and jenkins (JenkinsHash).
-    Used by bloom filters.
+        used by HFile/StoreFile. Default of 0.4 means allocate 40%.
+        Set to 0 to disable but it's not recommended; you need at least
+        enough cache to hold the storefile indices.
     </description>
   </property>
   <property>
@@ -521,20 +716,20 @@
       </description>
   </property>
   <property>
-      <name>io.storefile.bloom.block.size</name>
-      <value>131072</value>
+      <name>hfile.block.bloom.cacheonwrite</name>
+      <value>false</value>
       <description>
-          The size in bytes of a single block ("chunk") of a compound Bloom
-          filter. This size is approximate, because Bloom blocks can only be
-          inserted at data block boundaries, and the number of keys per data
-          block varies.
+          Enables cache-on-write for inline blocks of a compound Bloom filter.
       </description>
   </property>
   <property>
-      <name>hfile.block.bloom.cacheonwrite</name>
-      <value>false</value>
+      <name>io.storefile.bloom.block.size</name>
+      <value>131072</value>
       <description>
-          Enables cache-on-write for inline blocks of a compound Bloom filter.
+          The size in bytes of a single block ("chunk") of a compound Bloom
+          filter. This size is approximate, because Bloom blocks can only be
+          inserted at data block boundaries, and the number of keys per data
+          block varies.
       </description>
   </property>
   <property>
@@ -545,7 +740,6 @@
           block is finished.
       </description>
   </property>
-
   <property>
     <name>hbase.rpc.server.engine</name>
     <value>org.apache.hadoop.hbase.ipc.ProtobufRpcServerEngine</value>
@@ -554,13 +748,22 @@
     </description>
   </property>
   <property>
+    <name>hbase.rpc.timeout</name>
+    <value>60000</value>
+    <description>
+        This is for the RPC layer to define how long HBase client applications
+        take for a remote call to time out. It uses pings to check connections
+        but will eventually throw a TimeoutException.
+        The default value is 10000ms(10s).
+    </description>
+  </property>
+  <property>
     <name>hbase.ipc.client.tcpnodelay</name>
     <value>true</value>
     <description>Set no delay on rpc socket connections.  See
     http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay()
     </description>
   </property>
-
   <!-- The following properties configure authentication information for
        HBase processes when using Kerberos security.  There are no default
        values, included here for documentation purposes -->
@@ -599,7 +802,6 @@
     specified in hbase.regionserver.keytab.file
     </description>
   </property>
-
   <!-- Additional configuration specific to HBase security -->
   <property>
     <name>hadoop.policy.file</name>
@@ -631,43 +833,6 @@
     authentication token expires.  Only used when HBase security is enabled.
     </description>
   </property>
-
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>180000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.rootserver</name>
-    <value>root-region-server</value>
-    <description>Path to ZNode holding root region location. This is written by
-      the master and read by clients and region servers. If a relative path is
-      given, the parent folder will be ${zookeeper.znode.parent}. By default,
-      this means the root location is stored at /hbase/root-region-server.
-    </description>
-  </property>
-
-  <property>
-    <name>zookeeper.znode.acl.parent</name>
-    <value>acl</value>
-    <description>Root ZNode for access control lists.</description>
-  </property>
-
   <property>
     <name>hbase.coprocessor.region.classes</name>
     <value></value>
@@ -678,114 +843,6 @@
     A coprocessor can also be loaded on demand by setting HTableDescriptor.
     </description>
   </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-    org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-    loaded by default on the active HMaster process. For any implemented
-    coprocessor methods, the listed classes will be called in order. After
-    implementing your own MasterObserver, just put it in HBase's classpath
-    and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.peerport</name>
-    <value>2888</value>
-    <description>Port used by ZooKeeper peers to talk to each other.
-    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-    for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.leaderport</name>
-    <value>3888</value>
-    <description>Port used by ZooKeeper for leader election.
-    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-    for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>false</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <!--
-  Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
-  All properties with an "hbase.zookeeper.property." prefix are converted for
-  ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
-  e.g.  "initLimit=10" you would append the following to your configuration:
-    <property>
-      <name>hbase.zookeeper.property.initLimit</name>
-      <value>10</value>
-    </property>
-  -->
-  <property>
-    <name>hbase.zookeeper.property.initLimit</name>
-    <value>10</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The number of ticks that the initial synchronization phase can take.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.syncLimit</name>
-    <value>5</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The number of ticks that can pass between sending a request and getting an
-    acknowledgment.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.dataDir</name>
-    <value>${hbase.tmp.dir}/zookeeper</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The directory where the snapshot is stored.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.maxClientCnxns</name>
-    <value>300</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    Limit on number of concurrent connections (at the socket level) that a
-    single client, identified by IP address, may make to a single member of
-    the ZooKeeper ensemble. Set high to avoid zk connection issues running
-    standalone and pseudo-distributed.
-    </description>
-  </property>
-  <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
   <property>
     <name>hbase.rest.port</name>
     <value>8080</value>
@@ -800,7 +857,29 @@
     true: Only the GET method is permitted.
     </description>
   </property>
-
+  <property>
+    <name>hbase.rest.threads.max</name>
+    <value>100</value>
+    <description>
+        The maximum number of threads of the REST server thread pool.
+        Threads in the pool are reused to process REST requests. This
+        controls the maximum number of requests processed concurrently.
+        It may help to control the memory used by the REST server to
+        avoid OOM issues. If the thread pool is full, incoming requests
+        will be queued up and wait for some free threads. The default
+        is 100.
+    </description>
+  </property>
+  <property>
+    <name>hbase.rest.threads.min</name>
+    <value>2</value>
+    <description>
+        The minimum number of threads of the REST server thread pool.
+        The thread pool always has at least these number of threads so
+        the REST server is ready to serve incoming requests. The default
+        is 2.
+    </description>
+  </property>
   <property skipInDoc="true">
     <name>hbase.defaults.for.version</name>
     <value>@@@VERSION@@@</value>
@@ -824,6 +903,17 @@
     </description>
   </property>
   <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+    org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+    loaded by default on the active HMaster process. For any implemented
+    coprocessor methods, the listed classes will be called in order. After
+    implementing your own MasterObserver, just put it in HBase's classpath
+    and add the fully qualified class name here.
+    </description>
+  </property>
+  <property>
       <name>hbase.coprocessor.abortonerror</name>
       <value>false</value>
       <description>
@@ -854,15 +944,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-  <property>
     <name>hbase.thrift.minWorkerThreads</name>
     <value>16</value>
     <description>
@@ -930,37 +1011,6 @@
 	</description>
   </property>
   <property>
-    <name>hbase.master.hfilecleaner.plugins</name>
-    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
-    <description>A comma-separated list of HFileCleanerDelegate invoked by
-    the HFileCleaner service. These HFiles cleaners are called in order,
-    so put the cleaner that prunes the most files in front. To
-    implement your own HFileCleanerDelegate, just put it in HBase's classpath
-    and add the fully qualified class name here. Always add the above
-    default log cleaners in the list as they will be overwritten in hbase-site.xml.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.catalog.timeout</name>
-    <value>600000</value>
-    <description>Timeout value for the Catalog Janitor from the regionserver to META.</description>
-  </property>
-  <property>
-    <name>hbase.master.catalog.timeout</name>
-    <value>600000</value>
-    <description>Timeout value for the Catalog Janitor from the master to META.</description>
-  </property>
-  <property>
-    <name>hbase.config.read.zookeeper.config</name>
-    <value>false</value>
-    <description>
-        Set to true to allow HBaseConfiguration to read the
-        zoo.cfg file for ZooKeeper properties. Switching this to true
-        is not recommended, since the functionality of reading ZK
-        properties from a zoo.cfg file has been deprecated.
-    </description>
-  </property>
-  <property>
     <name>hbase.snapshot.enabled</name>
     <value>true</value>
     <description>
@@ -968,39 +1018,6 @@
     </description>
   </property>
   <property>
-    <name>hbase.rest.threads.max</name>
-    <value>100</value>
-    <description>
-        The maximum number of threads of the REST server thread pool.
-        Threads in the pool are reused to process REST requests. This
-        controls the maximum number of requests processed concurrently.
-        It may help to control the memory used by the REST server to
-        avoid OOM issues. If the thread pool is full, incoming requests
-        will be queued up and wait for some free threads. The default
-        is 100.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rest.threads.min</name>
-    <value>2</value>
-    <description>
-        The minimum number of threads of the REST server thread pool.
-        The thread pool always has at least these number of threads so
-        the REST server is ready to serve incoming requests. The default
-        is 2.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rpc.timeout</name>
-    <value>60000</value>
-    <description>
-        This is for the RPC layer to define how long HBase client applications
-        take for a remote call to time out. It uses pings to check connections
-        but will eventually throw a TimeoutException. 
-        The default value is 60000ms(60s).
-    </description>
-  </property>
-  <property>
     <name>hbase.server.compactchecker.interval.multiplier</name>
     <value>1000</value>
     <description>

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java Thu May 23 04:11:12 2013
@@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 
 /**
@@ -79,8 +78,9 @@ public class CompactionConfiguration {
     throttlePoint =  conf.getLong("hbase.regionserver.thread.compaction.throttle",
           2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
     shouldDeleteExpired = conf.getBoolean("hbase.store.delete.expired.storefile", true);
-    majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
-    majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
+    majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24*7);
+    // Make it 0.5 so jitter has us fall evenly either side of when the compaction should run
+    majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);
 
     LOG.info("Compaction configuration " + this.toString());
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Thu May 23 04:11:12 2013
@@ -1064,7 +1064,12 @@ public class HBaseTestingUtility extends
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
     for(byte[] family : families) {
-      desc.addFamily(new HColumnDescriptor(family));
+      HColumnDescriptor hcd = new HColumnDescriptor(family);
+      // Disable blooms (they are on by default as of 0.95) but we disable them here because
+      // tests have hard coded counts of what to expect in block cache, etc., and blooms being
+      // on is interfering.
+      hcd.setBloomFilterType(BloomType.NONE);
+      desc.addFamily(hcd);
     }
     getHBaseAdmin().createTable(desc);
     return new HTable(c, tableName);
@@ -1118,8 +1123,7 @@ public class HBaseTestingUtility extends
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
     for (byte[] family : families) {
-      HColumnDescriptor hcd = new HColumnDescriptor(family)
-          .setMaxVersions(numVersions);
+      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
       desc.addFamily(hcd);
     }
     getHBaseAdmin().createTable(desc);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java Thu May 23 04:11:12 2013
@@ -95,7 +95,9 @@ public class TestMultiVersions {
   @Test
   public void testTimestamps() throws Exception {
     HTableDescriptor desc = new HTableDescriptor("testTimestamps");
-    desc.addFamily(new HColumnDescriptor(TimestampTestBase.FAMILY_NAME));
+    HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME);
+    hcd.setMaxVersions(3);
+    desc.addFamily(hcd);
     this.admin.createTable(desc);
     HTable table = new HTable(UTIL.getConfiguration(), desc.getName());
     // TODO: Remove these deprecated classes or pull them in here if this is
@@ -134,7 +136,9 @@ public class TestMultiVersions {
     final long timestamp1 = 100L;
     final long timestamp2 = 200L;
     final HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(contents));
+    HColumnDescriptor hcd = new HColumnDescriptor(contents);
+    hcd.setMaxVersions(3);
+    desc.addFamily(hcd);
     this.admin.createTable(desc);
     Put put = new Put(row, timestamp1);
     put.add(contents, contents, value1);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Thu May 23 04:11:12 2013
@@ -171,7 +171,7 @@ public class TestFromClientSide {
      final byte[] T2 = Bytes.toBytes("T2");
      final byte[] T3 = Bytes.toBytes("T3");
      HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
-         .setKeepDeletedCells(true);
+         .setKeepDeletedCells(true).setMaxVersions(3);
 
      HTableDescriptor desc = new HTableDescriptor(TABLENAME);
      desc.addFamily(hcd);
@@ -1730,7 +1730,7 @@ public class TestFromClientSide {
     byte [][] VALUES = makeN(VALUE, 5);
     long [] ts = {1000, 2000, 3000, 4000, 5000};
 
-    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, 3);
 
     Put put = new Put(ROW);
     put.add(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]);
@@ -4459,7 +4459,7 @@ public class TestFromClientSide {
     conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);
 
     final HTable table = TEST_UTIL.createTable(tableName,
-        new byte[][] { FAMILY }, conf);
+        new byte[][] { FAMILY }, conf, 3);
     table.setAutoFlush(true);
 
     final long ts = EnvironmentEdgeManager.currentTimeMillis();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java Thu May 23 04:11:12 2013
@@ -47,7 +47,7 @@ public class TestColumnPrefixFilter {
   public void testColumnPrefixFilter() throws IOException {
     String family = "Family";
     HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
-    htd.addFamily(new HColumnDescriptor(family));
+    htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3));
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HRegion region = HRegion.createHRegion(info, TEST_UTIL.
       getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
@@ -109,7 +109,7 @@ public class TestColumnPrefixFilter {
   public void testColumnPrefixFilterWithFilterList() throws IOException {
     String family = "Family";
     HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
-    htd.addFamily(new HColumnDescriptor(family));
+    htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3));
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HRegion region = HRegion.createHRegion(info, TEST_UTIL.
       getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java Thu May 23 04:11:12 2013
@@ -71,8 +71,12 @@ public class TestDependentColumnFilter {
     testVals = makeTestVals();
 
     HTableDescriptor htd = new HTableDescriptor(this.getClass().getName());
-    htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
-    htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
+    HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
+    hcd0.setMaxVersions(3);
+    htd.addFamily(hcd0);
+    HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
+    hcd1.setMaxVersions(3);
+    htd.addFamily(hcd1);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
       TEST_UTIL.getConfiguration(), htd);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java Thu May 23 04:11:12 2013
@@ -47,7 +47,9 @@ public class TestMultipleColumnPrefixFil
   public void testMultipleColumnPrefixFilter() throws IOException {
     String family = "Family";
     HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
-    htd.addFamily(new HColumnDescriptor(family));
+    HColumnDescriptor hcd = new HColumnDescriptor(family);
+    hcd.setMaxVersions(3);
+    htd.addFamily(hcd);
     // HRegionInfo info = new HRegionInfo(htd, null, null, false);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HRegion region = HRegion.createHRegion(info, TEST_UTIL.
@@ -109,8 +111,12 @@ public class TestMultipleColumnPrefixFil
     String family1 = "Family1";
     String family2 = "Family2";
     HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
-    htd.addFamily(new HColumnDescriptor(family1));
-    htd.addFamily(new HColumnDescriptor(family2));
+    HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
+    hcd1.setMaxVersions(3);
+    htd.addFamily(hcd1);
+    HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
+    hcd2.setMaxVersions(3);
+    htd.addFamily(hcd2);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HRegion region = HRegion.createHRegion(info, TEST_UTIL.
       getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java Thu May 23 04:11:12 2013
@@ -27,6 +27,7 @@ import java.util.Map;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.client.Get;
@@ -34,9 +35,9 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.MultiThreadedWriter;
 import org.apache.hadoop.hbase.util.Strings;
 import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator;
 import org.junit.Test;
@@ -91,17 +92,16 @@ public class TestEncodedSeekers {
   @Test
   public void testEncodedSeeker() throws IOException {
     System.err.println("Testing encoded seekers for encoding " + encoding);
-    LruBlockCache cache = (LruBlockCache)
-    new CacheConfig(testUtil.getConfiguration()).getBlockCache();
+    LruBlockCache cache =
+      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
     cache.clearCache();
-
-    HRegion region = testUtil.createTestRegion(
-        TABLE_NAME, new HColumnDescriptor(CF_NAME)
-            .setMaxVersions(MAX_VERSIONS)
-            .setDataBlockEncoding(encoding)
-            .setEncodeOnDisk(encodeOnDisk)
-            .setBlocksize(BLOCK_SIZE)
-    );
+    // Need to disable default row bloom filter for this test to pass.
+    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
+        setDataBlockEncoding(encoding).
+        setEncodeOnDisk(encodeOnDisk).
+        setBlocksize(BLOCK_SIZE).
+        setBloomFilterType(BloomType.NONE);
+    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
 
     //write the data, but leave some in the memstore
     doPuts(region);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java Thu May 23 04:11:12 2013
@@ -133,7 +133,7 @@ public class TestImportExport {
   @Test
   public void testSimpleCase() throws Exception {
     String EXPORT_TABLE = "exportSimpleCase";
-    HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA);
+    HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA, 3);
     Put p = new Put(ROW1);
     p.add(FAMILYA, QUAL, now, QUAL);
     p.add(FAMILYA, QUAL, now+1, QUAL);
@@ -153,7 +153,7 @@ public class TestImportExport {
     assertTrue(runExport(args));
 
     String IMPORT_TABLE = "importTableSimpleCase";
-    t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB);
+    t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB, 3);
     args = new String[] {
         "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING,
         IMPORT_TABLE,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java Thu May 23 04:11:12 2013
@@ -56,6 +56,7 @@ public class TestColumnSeeking {
 
     HColumnDescriptor hcd =
         new HColumnDescriptor(familyBytes).setMaxVersions(1000);
+    hcd.setMaxVersions(3);
     HTableDescriptor htd = new HTableDescriptor(table);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
@@ -168,7 +169,9 @@ public class TestColumnSeeking {
     String table = "TestSingleVersions";
 
     HTableDescriptor htd = new HTableDescriptor(table);
-    htd.addFamily(new HColumnDescriptor(family));
+    HColumnDescriptor hcd = new HColumnDescriptor(family);
+    hcd.setMaxVersions(3);
+    htd.addFamily(hcd);
 
     HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
     HRegion region =

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Thu May 23 04:11:12 2013
@@ -4014,7 +4014,10 @@ public class TestHRegion extends HBaseTe
     HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.setReadOnly(isReadOnly);
     for(byte [] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
+      HColumnDescriptor hcd = new HColumnDescriptor(family);
+      // Set default to be three versions.
+      hcd.setMaxVersions(Integer.MAX_VALUE);
+      htd.addFamily(hcd);
     }
     HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false);
     Path path = new Path(DIR + callingMethod);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java Thu May 23 04:11:12 2013
@@ -35,6 +35,8 @@ import org.junit.experimental.categories
 @Category(MediumTests.class)
 @SuppressWarnings("deprecation")
 public class TestHRegionBusyWait extends TestHRegion {
+  // TODO: This subclass runs all the tests in TestHRegion as well as the test below which means
+  // all TestHRegion tests are run twice.
   public TestHRegionBusyWait() {
     conf.set("hbase.busy.wait.duration", "1000");
   }
@@ -87,4 +89,4 @@ public class TestHRegionBusyWait extends
       region = null;
     }
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java Thu May 23 04:11:12 2013
@@ -143,6 +143,7 @@ public class TestSeekOptimizations {
         new HColumnDescriptor(FAMILY)
             .setCompressionType(comprAlgo)
             .setBloomFilterType(bloomType)
+            .setMaxVersions(3)
     );
 
     // Delete the given timestamp and everything before.

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java Thu May 23 04:11:12 2013
@@ -120,6 +120,7 @@ public class TestReplicationBase {
 
     HTableDescriptor table = new HTableDescriptor(tableName);
     HColumnDescriptor fam = new HColumnDescriptor(famName);
+    fam.setMaxVersions(3);
     fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
     table.addFamily(fam);
     fam = new HColumnDescriptor(noRepfamName);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java Thu May 23 04:11:12 2013
@@ -78,9 +78,9 @@ public class TestRemoteTable {
     HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
     if (!admin.tableExists(TABLE)) {
       HTableDescriptor htd = new HTableDescriptor(TABLE);
-      htd.addFamily(new HColumnDescriptor(COLUMN_1));
-      htd.addFamily(new HColumnDescriptor(COLUMN_2));
-      htd.addFamily(new HColumnDescriptor(COLUMN_3));
+      htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
+      htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
+      htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
       admin.createTable(htd);
       HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
       Put put = new Put(ROW_1);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java?rev=1485561&r1=1485560&r2=1485561&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java Thu May 23 04:11:12 2013
@@ -76,9 +76,8 @@ public class TestThriftHBaseServiceHandl
   private static byte[] valueAname = Bytes.toBytes("valueA");
   private static byte[] valueBname = Bytes.toBytes("valueB");
   private static HColumnDescriptor[] families = new HColumnDescriptor[] {
-      new HColumnDescriptor(familyAname),
-      new HColumnDescriptor(familyBname)
-          .setMaxVersions(2)
+      new HColumnDescriptor(familyAname).setMaxVersions(3),
+      new HColumnDescriptor(familyBname).setMaxVersions(2)
   };
 
 



Mime
View raw message