hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [2/2] hbase git commit: Revert "HBASE-12270 A bug in the bucket cache, with cache blocks on write enabled (Liu Shaohui)" Reverting since this is failing often on 0.98 branch
Date Fri, 02 Jan 2015 17:27:40 GMT
Revert "HBASE-12270 A bug in the bucket cache, with cache blocks on write enabled (Liu Shaohui)"
Reverting since this is failing often on 0.98 branch

This reverts commit 809de6ae1ca68caf5c0c614e3e1776ad082514f0.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9b25e850
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9b25e850
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9b25e850

Branch: refs/heads/0.98
Commit: 9b25e850df5b915305194e21ae4a47d0ab029034
Parents: bb7074b
Author: stack <stack@apache.org>
Authored: Fri Jan 2 09:24:41 2015 -0800
Committer: stack <stack@apache.org>
Committed: Fri Jan 2 09:24:41 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/io/hfile/HFileBlock.java       | 11 +---
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 58 ++++++--------------
 2 files changed, 20 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9b25e850/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 5e151fc..9108465 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -216,11 +216,11 @@ public class HFileBlock implements Cacheable {
     this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
     this.prevBlockOffset = prevBlockOffset;
     this.buf = buf;
+    if (fillHeader)
+      overwriteHeader();
     this.offset = offset;
     this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
     this.fileContext = fileContext;
-    if (fillHeader)
-      overwriteHeader();
     this.buf.rewind();
   }
 
@@ -322,11 +322,6 @@ public class HFileBlock implements Cacheable {
     buf.putInt(onDiskSizeWithoutHeader);
     buf.putInt(uncompressedSizeWithoutHeader);
     buf.putLong(prevBlockOffset);
-    if (this.fileContext.isUseHBaseChecksum()) {
-      buf.put(fileContext.getChecksumType().getCode());
-      buf.putInt(fileContext.getBytesPerChecksum());
-      buf.putInt(onDiskDataSizeWithHeader);
-    }
   }
 
   /**
@@ -1129,7 +1124,7 @@ public class HFileBlock implements Cacheable {
           cacheConf.shouldCacheCompressed(blockType.getCategory()) ?
             getOnDiskBufferWithHeader() :
             getUncompressedBufferWithHeader(),
-          FILL_HEADER, startOffset,
+          DONT_FILL_HEADER, startOffset,
           onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9b25e850/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index e85bee6..e3b09c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -37,7 +37,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -49,7 +48,6 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -160,48 +158,27 @@ public class TestCacheOnWrite {
   }
 
   public TestCacheOnWrite(CacheOnWriteType cowType, Compression.Algorithm compress,
-      BlockEncoderTestType encoderType, boolean cacheCompressedData, BlockCache blockCache)
{
+      BlockEncoderTestType encoderType, boolean cacheCompressedData) {
     this.cowType = cowType;
     this.compress = compress;
     this.encoderType = encoderType;
     this.encoder = encoderType.getEncoder();
     this.cacheCompressedData = cacheCompressedData;
-    this.blockCache = blockCache;
     testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress +
         ", encoderType=" + encoderType + ", cacheCompressedData=" + cacheCompressedData +
"]";
     System.out.println(testDescription);
   }
 
-  private static List<BlockCache> getBlockCaches() throws IOException {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    List<BlockCache> blockcaches = new ArrayList<BlockCache>();
-    // default
-    blockcaches.add(new CacheConfig(conf).getBlockCache());
-
-    // memory
-    BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
-    blockcaches.add(lru);
-
-    // bucket cache
-    FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
-    int[] bucketSizes = {INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024 };
-    BlockCache bucketcache =
-        new BucketCache("file:" + TEST_UTIL.getDataTestDir() + "/bucket.data",
-            128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
-    blockcaches.add(bucketcache);
-    return blockcaches;
-  }
-
   @Parameters
-  public static Collection<Object[]> getParameters() throws IOException {
+  public static Collection<Object[]> getParameters() {
     List<Object[]> cowTypes = new ArrayList<Object[]>();
-    for (BlockCache blockache : getBlockCaches()) {
-      for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
-        for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS)
{
-          for (BlockEncoderTestType encoderType : BlockEncoderTestType.values()) {
-            for (boolean cacheCompressedData : new boolean[] { false, true }) {
-              cowTypes.add(new Object[] { cowType, compress, encoderType, cacheCompressedData,
blockache});
-            }
+    for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
+      for (Compression.Algorithm compress :
+           HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
+        for (BlockEncoderTestType encoderType :
+             BlockEncoderTestType.values()) {
+          for (boolean cacheCompressedData : new boolean[] { false, true }) {
+            cowTypes.add(new Object[] { cowType, compress, encoderType, cacheCompressedData
});
           }
         }
       }
@@ -217,13 +194,17 @@ public class TestCacheOnWrite {
     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
         BLOOM_BLOCK_SIZE);
+    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
+      cowType.shouldBeCached(BlockType.DATA));
+    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
+        cowType.shouldBeCached(BlockType.LEAF_INDEX));
+    conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
+        cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
     conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
     cowType.modifyConf(conf);
     fs = HFileSystem.get(conf);
-    cacheConf =
-        new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
-        cowType.shouldBeCached(BlockType.LEAF_INDEX),
-        cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, true,
false);
+    cacheConf = new CacheConfig(conf);
+    blockCache = cacheConf.getBlockCache();
   }
 
   @After
@@ -327,11 +308,6 @@ public class TestCacheOnWrite {
       assertEquals("{" + cachedDataBlockType
           + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}", countByType);
     }
-
-    // iterate all the keyvalue from hfile
-    while (scanner.next()) {
-      Cell cell = scanner.getKeyValue();
-    }
     reader.close();
   }
 


Mime
View raw message