hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From li...@apache.org
Subject svn commit: r1470792 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/util/ test/java/org/apache/hadoop/hb...
Date Tue, 23 Apr 2013 02:49:56 GMT
Author: liyin
Date: Tue Apr 23 02:49:56 2013
New Revision: 1470792

URL: http://svn.apache.org/r1470792
Log:
[HBASE-5032][89-fb] Disable the write of Delete Column Bloom Filter

Author: adela

Summary:
The Delete Column Bloom filter write was enabled by default,
adding a parameter, so we can enable/disable it in the future

Test Plan: to be defined

Reviewers: liyintang, aaiyer, rshroff, manukranthk, shaneh

Reviewed By: liyintang

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D746107

Task ID: 2186656

Added:
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelColBloomFilter.java
Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java Tue Apr 23
02:49:56 2013
@@ -652,9 +652,16 @@ public final class HConstants {
 
   public static final String DELETE_COLUMN_BLOOM_FILTER = "delete_column_bloom_filter";
 
-  public static final boolean ENABLE_DELETE_COLUMN_BLOOM_FILTER = false;
-
-  public static final String ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING = "enable_delete_column_bloom_filter";
+  /**
+   * This will enable/disable the usage of delete col bloom filter. Note that
+   * this won't enable/disable the delete bloom filter for being written/read.
+   * In fact, we could read and write it but we will not use it when we scan
+   * data, thus we won't do the optimized reads. In order to disable/enable the
+   * filter for write&read both, use
+   * BloomFilterFactory.IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED
+   */
+  public static final boolean USE_DELETE_COLUMN_BLOOM_FILTER = true;
+  public static final String USE_DELETE_COLUMN_BLOOM_FILTER_STRING = "use_delete_column_bloom_filter";
 
   // Delaying the region server load balancing by the following amount for a
   // load balancing where source is a favored region server.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
Tue Apr 23 02:49:56 2013
@@ -1142,8 +1142,9 @@ public class HFileReaderV2 extends Abstr
     }
 
     for (HFileBlock b : loadOnOpenBlocks)
-      if (b.getBlockType() == blockType)
+      if (b.getBlockType() == blockType) {
         return b.getByteStream();
+      }
     return null;
   }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
Tue Apr 23 02:49:56 2013
@@ -527,9 +527,10 @@ public class StoreFile extends SchemaCon
 
     // load delete family bloom filter
     reader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META);
-
-    // load delete column bloom filter
-    reader.loadBloomfilter(BlockType.DELETE_COLUMN_BLOOM_META);
+    if (BloomFilterFactory.isDeleteColumnBloomEnabled(conf)) {
+      // load delete column bloom filter
+      reader.loadBloomfilter(BlockType.DELETE_COLUMN_BLOOM_META);
+    }
 
     try {
       byte [] timerangeBytes = metadataMap.get(TIMERANGE_KEY);
@@ -1148,6 +1149,10 @@ public class StoreFile extends SchemaCon
       return generalBloomFilterWriter;
     }
 
+    BloomFilterWriter getDeleteColumnBloomFilterWriter() {
+      return deleteColumnBloomFilterWriter;
+    }
+
     private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException {
       boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0);
       if (haveBloom) {
@@ -1742,6 +1747,7 @@ public class StoreFile extends SchemaCon
     void disableBloomFilterForTesting() {
       generalBloomFilter = null;
       this.deleteFamilyBloomFilter = null;
+      this.deleteColumnBloomFilter = null;
     }
 
     public long getMaxTimestamp() {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
Tue Apr 23 02:49:56 2013
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.regionser
  * KeyValueScanner adaptor over the Reader.  It also provides hooks into
  * bloom filter things.
  */
-class StoreFileScanner implements KeyValueScanner {
+public class StoreFileScanner implements KeyValueScanner {
   static final Log LOG = LogFactory.getLog(Store.class);
 
   // the reader it comes from:

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
Tue Apr 23 02:49:56 2013
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.KeyValueC
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HBaseServer.Call;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.regionserver.kvaggregator.KeyValueAggregator;
@@ -65,7 +66,10 @@ public class StoreScanner extends NonLaz
   private final KeyValueAggregator keyValueAggregator;
   private final NavigableSet<byte[]> columns;
   private final long oldestUnexpiredTS;
-  private boolean deleteColBloomEnabled = false;
+  /**
+   * Whether the deleteColBloomFilter is enabled for usage
+   */
+  private boolean isUsingDeleteColBloom = false;
 
   /** We don't ever expect to change this, the constant is just for clarity. */
   static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true;
@@ -106,9 +110,12 @@ public class StoreScanner extends NonLaz
     // StoreFile.passesBloomFilter(Scan, SortedSet<byte[]>).
     useRowColBloom = numCol > 1 || (!isGet && numCol == 1);
     if (store != null) {
-      this.deleteColBloomEnabled = store.conf.getBoolean(
-          HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING,
-          HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER);
+      // check first whether the delete column bloom filter is enabled for write/read
+      if (BloomFilterFactory.isDeleteColumnBloomEnabled(store.conf)) {
+        this.isUsingDeleteColBloom = store.conf.getBoolean(
+            HConstants.USE_DELETE_COLUMN_BLOOM_FILTER_STRING,
+            HConstants.USE_DELETE_COLUMN_BLOOM_FILTER);
+      }
     }
   }
 
@@ -458,7 +465,7 @@ public class StoreScanner extends NonLaz
                 reseek(matcher.getKeyForNextColumn(kv, null, false));
               } else {
                 reseek(matcher.getKeyForNextColumn(kv,
-                    this.heap.getActiveScanners(), deleteColBloomEnabled));
+                    this.heap.getActiveScanners(), isUsingDeleteColBloom));
               }
             } else {
               this.heap.next();
@@ -500,7 +507,7 @@ public class StoreScanner extends NonLaz
             if (this.store == null) {
               reseek(matcher.getKeyForNextColumn(kv, null, false));
             } else {
-              reseek(matcher.getKeyForNextColumn(kv, this.heap.getActiveScanners(),deleteColBloomEnabled));
+              reseek(matcher.getKeyForNextColumn(kv, this.heap.getActiveScanners(),isUsingDeleteColBloom));
             }
             break;
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
Tue Apr 23 02:49:56 2013
@@ -136,10 +136,11 @@ public final class BloomFilterFactory {
   }
 
   /**
-   * @return true if Delete Column Bloom filters are enabled in the given configuration
+   * @return true if Delete Column Bloom filters are enabled in the given
+   * configuration
    */
   public static boolean isDeleteColumnBloomEnabled(Configuration conf) {
-    return conf.getBoolean(IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, true);
+    return conf.getBoolean(IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false);
   }
 
   /**

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelColBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelColBloomFilter.java?rev=1470792&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelColBloomFilter.java
(added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelColBloomFilter.java
Tue Apr 23 02:49:56 2013
@@ -0,0 +1,155 @@
+package org.apache.hadoop.hbase.client;
+
+import java.io.DataInput;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.TestStoreFile;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestDelColBloomFilter {
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static byte[] FAMILY = Bytes.toBytes("testFamily");
+  private static byte[] QUALIFIER = Bytes.toBytes("testQualifier");
+  private static final byte[] TABLE = Bytes.toBytes("mytable");
+  private HBaseTestingUtility util = new HBaseTestingUtility();
+  private final String DIR = util.getTestDir() + "/TestHRegion/";
+  private static int SLAVES = 3;
+  public static AtomicInteger delBloomData = new AtomicInteger(0);
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   *
+   * @param numPuts - the number of puts we will insert
+   * @param numDeletes - number of deletes we will insert
+   * @param delColBloomFilterEnabled - will the delete col bloom filter be
+   * enabled
+   * @param toRead - whether we want to read the del bloom filter info
+   * @throws Exception
+   */
+  private void testDelColBloomFilter(int numPuts, int numDeletes,
+      boolean delColBloomFilterEnabled, boolean toRead) throws Exception {
+    util.startMiniCluster();
+    try {
+      HTableDescriptor htd = new HTableDescriptor(TABLE);
+      htd.addFamily(new HColumnDescriptor(FAMILY));
+
+      HRegionInfo info = new HRegionInfo(htd, null, null, false);
+      Path path = new Path(DIR);
+      HRegion region = HRegion.createHRegion(info, path,
+          util.getConfiguration());
+      for (int i = 0; i < numPuts; i++) {
+        Put put = new Put(Bytes.toBytes("row" + i));
+        put.add(FAMILY, QUALIFIER, Bytes.toBytes("value"));
+        region.put(put);
+      }
+      for (int i = 0; i < numDeletes; i++) {
+        Delete del = new Delete(Bytes.toBytes("row" + i));
+        del.deleteColumns(FAMILY, QUALIFIER);
+        region.delete(del, null, true);
+      }
+      region.flushcache();
+      int collectedDeletes = 0;
+      // if we don't want to read it but we wrote it
+      if (!toRead && delColBloomFilterEnabled) {
+        util.getConfiguration().setBoolean(
+            BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false);
+        delBloomData.set(0);
+      }
+      // we need to close the region in order to actually open it again (so that
+      // the new config will take effect (if there is a config change)
+      region.close();
+      HRegion regionNew = HRegion.openHRegion(info, path, null,
+          util.getConfiguration());
+      List<StoreFile> files = TestStoreFile.getStoreFiles(regionNew
+          .getStore(FAMILY));
+      for (StoreFile file : files) {
+        file.closeReader(false);
+        StoreFile.Reader reader = file.createReader();
+        Reader hfileReader = reader.getHFileReader();
+        if (!toRead && delColBloomFilterEnabled) {
+          Assert.assertEquals(0, delBloomData.intValue());
+        } else {
+          DataInput data = hfileReader.getDeleteColumnBloomFilterMetadata();
+          // if there are no deletes we should not get any metadata for the del
+          // col bloom filter
+          if (numDeletes > 0) {
+            // if the delete col bloom filter is disabled we should not get any
+            // metadata
+            if (!delColBloomFilterEnabled) {
+              Assert.assertEquals(null, data);
+            } else {
+              // if the number of deletes > 0 and delete col bloom filter is
+              // disabled, check if metadata is not null
+              Assert.assertTrue(data != null);
+            }
+          } else {
+            Assert.assertEquals(null, data);
+          }
+          collectedDeletes += reader.getDeleteColumnCnt();
+        }
+        if (toRead) {
+          Assert.assertEquals(numDeletes, collectedDeletes);
+        }
+      }
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  /**
+   * This will run different tests of the delete bloom filter, we are testing
+   * whether we read/write the del col bloom filter when it is enabled/disabled
+   * by conf
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testWithDifferentConfiguraions() throws Exception {
+    util.getConfiguration().setBoolean(
+        BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, true);
+    // we enable the delete bloom filter on write and we want to read it
+    testDelColBloomFilter(1000, 100, true, true);
+    util.getConfiguration().setBoolean(
+        BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false);
+    // we disable the delete bloom filter on write and we do not want to read it
+    testDelColBloomFilter(1000, 100, false, false);
+    util.getConfiguration().setBoolean(
+        BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, true);
+    // we enable the delete bloom filter on write and we do not want to read it
+    testDelColBloomFilter(1000, 100, true, false);
+  }
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
Tue Apr 23 02:49:56 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -250,6 +251,10 @@ public class TestCacheOnWrite {
 
     LOG.info("Block count by type: " + blockCountByType);
     String countByType = blockCountByType.toString();
+    int bloom_chunk = 9;
+    if (BloomFilterFactory.isDeleteColumnBloomEnabled(conf)) {
+      bloom_chunk = 10;
+    }
     BlockType cachedDataBlockType =
         encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
 
@@ -257,11 +262,11 @@ public class TestCacheOnWrite {
     // so number of blocks depends on this parameter as well.
     if (encoder.getEncodingOnDisk() == DataBlockEncoding.PREFIX) {
       assertEquals("{" + cachedDataBlockType
-          + "=965, LEAF_INDEX=121, BLOOM_CHUNK=10, INTERMEDIATE_INDEX=17}",
+          + "=965, LEAF_INDEX=121, BLOOM_CHUNK="+bloom_chunk+", INTERMEDIATE_INDEX=17}",
           countByType);
     } else {
       assertEquals("{" + cachedDataBlockType
-          + "=1379, LEAF_INDEX=173, BLOOM_CHUNK=10, INTERMEDIATE_INDEX=24}",
+          + "=1379, LEAF_INDEX=173, BLOOM_CHUNK="+bloom_chunk+", INTERMEDIATE_INDEX=24}",
           countByType);
     }
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
Tue Apr 23 02:49:56 2013
@@ -274,4 +274,4 @@ public class TestLoadIncrementalHFiles {
       writer.close();
     }
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java?rev=1470792&r1=1470791&r2=1470792&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
Tue Apr 23 02:49:56 2013
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.junit.Test;
@@ -425,7 +426,7 @@ public class TestBlocksRead extends HBas
     HBaseConfiguration conf = getConf();
     conf.setBoolean("io.storefile.delete.column.bloom.enabled", true);
     initHRegion(TABLE, getName(), conf, FAMILY, true);
-    if (!conf.getBoolean(HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING, HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER))
{
+    if (!conf.getBoolean(BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false))
{
       System.out.println("ignoring this test since the delete bloom filter is not enabled...");
       return;
     }
@@ -488,7 +489,7 @@ public class TestBlocksRead extends HBas
     HBaseConfiguration conf = getConf();
     conf.setBoolean("io.storefile.delete.column.bloom.enabled", true);
     initHRegion(TABLE, getName(), conf, FAMILY, true);
-    if (!conf.getBoolean(HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING, HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER))
{
+    if (!conf.getBoolean(BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false))
{
       System.out.println("ignoring this test since the delete bloom filter is not enabled...");
       return;
     }
@@ -532,7 +533,7 @@ public class TestBlocksRead extends HBas
     HBaseConfiguration conf = getConf();
     conf.setBoolean("io.storefile.delete.column.bloom.enabled", true);
     initHRegion(TABLE, getName(), conf, FAMILY, true);
-    if (!conf.getBoolean(HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING, HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER))
{
+    if (!conf.getBoolean(BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false))
{
       System.out.println("ignoring this test since the delete bloom filter is not enabled...");
       return;
     }
@@ -582,7 +583,7 @@ public class TestBlocksRead extends HBas
     HBaseConfiguration conf = getConf();
     conf.setBoolean("io.storefile.delete.column.bloom.enabled", true);
     initHRegion(TABLE, getName(), conf, FAMILY, true);
-    if (!conf.getBoolean(HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER_STRING, HConstants.ENABLE_DELETE_COLUMN_BLOOM_FILTER))
{
+    if (!conf.getBoolean(BloomFilterFactory.IO_STOREFILE_DELETECOLUMN_BLOOM_ENABLED, false))
{
       System.out.println("ignoring this test since the delete bloom filter is not enabled...");
       return;
     }



Mime
View raw message