hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1583819 - in /hbase/trunk: hbase-common/src/main/resources/hbase-default.xml hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
Date Tue, 01 Apr 2014 23:00:04 GMT
Author: stack
Date: Tue Apr  1 23:00:03 2014
New Revision: 1583819

URL: http://svn.apache.org/r1583819
Log:
HBASE-10855 Enable hfilev3 by default

Modified:
    hbase/trunk/hbase-common/src/main/resources/hbase-default.xml
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java

Modified: hbase/trunk/hbase-common/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/resources/hbase-default.xml?rev=1583819&r1=1583818&r2=1583819&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/resources/hbase-default.xml (original)
+++ hbase/trunk/hbase-common/src/main/resources/hbase-default.xml Tue Apr  1 23:00:03 2014
@@ -666,10 +666,11 @@ possible configurations would overwhelm 
   </property>
   <property>
       <name>hfile.format.version</name>
-      <value>2</value>
-      <description>The HFile format version to use for new files. Set this to 1 to
test
-          backwards-compatibility. The default value of this option should be
-          consistent with FixedFileTrailer.MAX_VERSION.</description>
+      <value>3</value>
+      <description>The HFile format version to use for new files.
+      Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
+      Distributed Log Replay requires that tags are enabled.
+      </description>
   </property>
   <property>
       <name>hfile.block.bloom.cacheonwrite</name>

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java?rev=1583819&r1=1583818&r2=1583819&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
Tue Apr  1 23:00:03 2014
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.
 
 
 /**
- * Data structure to describe the distribution of HDFS blocks amount hosts.
+ * Data structure to describe the distribution of HDFS blocks among hosts.
  *
  * Adding erroneous data will be ignored silently.
  */

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1583819&r1=1583818&r2=1583819&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
Tue Apr  1 23:00:03 2014
@@ -3663,8 +3663,13 @@ public class TestHRegion {
   @Test
   public void testgetHDFSBlocksDistribution() throws Exception {
     HBaseTestingUtility htu = new HBaseTestingUtility();
-    final int DEFAULT_BLOCK_SIZE = 1024;
-    htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    // Why do we set the block size in this test?  If we set it smaller than the kvs, then
we'll
+    // break up the file in to more pieces that can be distributed across the three nodes
and we
+    // won't be able to have the condition this test asserts; that at least one node has
+    // a copy of all replicas -- if small block size, then blocks are spread evenly across
the
+    // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
+    // final int DEFAULT_BLOCK_SIZE = 1024;
+    // htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     htu.getConfiguration().setInt("dfs.replication", 2);
 
     // set up a cluster with 3 nodes
@@ -3691,15 +3696,25 @@ public class TestHRegion {
       firstRegion.flushcache();
       HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
 
-      // given the default replication factor is 2 and we have 2 HFiles,
+      // Given the default replication factor is 2 and we have 2 HFiles,
       // we will have total of 4 replica of blocks on 3 datanodes; thus there
       // must be at least one host that have replica for 2 HFiles. That host's
       // weight will be equal to the unique block weight.
       long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
+      StringBuilder sb = new StringBuilder();
+      for (String host: blocksDistribution1.getTopHosts()) {
+        if (sb.length() > 0) sb.append(", ");
+        sb.append(host);
+        sb.append("=");
+        sb.append(blocksDistribution1.getWeight(host));
+      }
 
       String topHost = blocksDistribution1.getTopHosts().get(0);
       long topHostWeight = blocksDistribution1.getWeight(topHost);
-      assertTrue(uniqueBlocksWeight1 == topHostWeight);
+      String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
+        topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
+      LOG.info(msg);
+      assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
 
       // use the static method to compute the value, it should be the same.
       // static method is used by load balancer or other components



Mime
View raw message