hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ramkris...@apache.org
Subject svn commit: r1525269 [6/8] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apa...
Date Sat, 21 Sep 2013 18:01:35 GMT
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java Sat Sep 21 18:01:32 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.encod
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
 
@@ -27,6 +28,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.ConcurrentSkipListSet;
@@ -35,24 +37,30 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CollectionBackedScanner;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  * Tests scanning/seeking data with PrefixTree Encoding.
  */
+@RunWith(Parameterized.class)
 @Category(SmallTests.class)
 public class TestPrefixTreeEncoding {
-  private static final Log LOG = LogFactory
-      .getLog(TestPrefixTreeEncoding.class);
-  static final String CF = "EncodingTestCF";
-  static final byte[] CF_BYTES = Bytes.toBytes(CF);
+  private static final Log LOG = LogFactory.getLog(TestPrefixTreeEncoding.class);
+  private static final String CF = "EncodingTestCF";
+  private static final byte[] CF_BYTES = Bytes.toBytes(CF);
   private static final int NUM_ROWS_PER_BATCH = 50;
   private static final int NUM_COLS_PER_ROW = 20;
 
@@ -61,7 +69,21 @@ public class TestPrefixTreeEncoding {
       KeyValue.COMPARATOR);
 
   private static boolean formatRowNum = false;
-
+  
+  @Parameters
+  public static Collection<Object[]> parameters() {
+    List<Object[]> paramList = new ArrayList<Object[]>();
+    {
+      paramList.add(new Object[] { false });
+      paramList.add(new Object[] { true });
+    }
+    return paramList;
+  }
+  private final boolean includesTag;
+  public TestPrefixTreeEncoding(boolean includesTag) {
+    this.includesTag = includesTag;
+  }
+ 
   @Before
   public void setUp() throws Exception {
     kvset.clear();
@@ -73,63 +95,74 @@ public class TestPrefixTreeEncoding {
     formatRowNum = true;
     PrefixTreeCodec encoder = new PrefixTreeCodec();
     int batchId = numBatchesWritten++;
-    ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, false);
+    ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, false, includesTag);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(false);
+    meta.setIncludesMvcc(false);
+    meta.setIncludesTags(includesTag);
+    meta.setCompressAlgo(Algorithm.NONE);
     HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
-        Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
-    encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
-    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
+        DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
+    encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
+    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
+        encoder.newDataBlockDecodingContext(meta));
     byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
-    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
-        DataBlockEncoding.ID_SIZE, onDiskBytes.length
-            - DataBlockEncoding.ID_SIZE);
+    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
+        onDiskBytes.length - DataBlockEncoding.ID_SIZE);
     seeker.setCurrentBuffer(readBuffer);
 
     // Seek before the first keyvalue;
-    KeyValue seekKey = KeyValue.createFirstDeleteFamilyOnRow(
-        getRowKey(batchId, 0), CF_BYTES);
-    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(),
-        seekKey.getKeyLength(), true);
+    KeyValue seekKey = KeyValue.createFirstDeleteFamilyOnRow(getRowKey(batchId, 0), CF_BYTES);
+    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(), seekKey.getKeyLength(),
+        true);
     assertEquals(null, seeker.getKeyValue());
 
     // Seek before the middle keyvalue;
-    seekKey = KeyValue.createFirstDeleteFamilyOnRow(
-        getRowKey(batchId, NUM_ROWS_PER_BATCH / 3), CF_BYTES);
-    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(),
-        seekKey.getKeyLength(), true);
+    seekKey = KeyValue.createFirstDeleteFamilyOnRow(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3),
+        CF_BYTES);
+    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(), seekKey.getKeyLength(),
+        true);
     assertNotNull(seeker.getKeyValue());
-    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3 - 1), seeker
-        .getKeyValue().getRow());
+    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3 - 1), seeker.getKeyValue().getRow());
 
     // Seek before the last keyvalue;
-    seekKey = KeyValue.createFirstDeleteFamilyOnRow(Bytes.toBytes("zzzz"),
-        CF_BYTES);
-    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(),
-        seekKey.getKeyLength(), true);
+    seekKey = KeyValue.createFirstDeleteFamilyOnRow(Bytes.toBytes("zzzz"), CF_BYTES);
+    seeker.seekToKeyInBlock(seekKey.getBuffer(), seekKey.getKeyOffset(), seekKey.getKeyLength(),
+        true);
     assertNotNull(seeker.getKeyValue());
-    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH - 1), seeker
-        .getKeyValue().getRow());
+    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH - 1), seeker.getKeyValue().getRow());
   }
 
   @Test
   public void testScanWithRandomData() throws Exception {
     PrefixTreeCodec encoder = new PrefixTreeCodec();
-    ByteBuffer dataBuffer = generateRandomTestData(kvset, numBatchesWritten++);
+    ByteBuffer dataBuffer = generateRandomTestData(kvset, numBatchesWritten++, includesTag);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(false);
+    meta.setIncludesMvcc(false);
+    meta.setIncludesTags(includesTag);
+    meta.setCompressAlgo(Algorithm.NONE);
     HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
-        Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
-    encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
-    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
-    byte[] onDiskBytes=blkEncodingCtx.getOnDiskBytesWithHeader();
-    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
-        DataBlockEncoding.ID_SIZE, onDiskBytes.length
-            - DataBlockEncoding.ID_SIZE);
+        DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
+    encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
+    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
+        encoder.newDataBlockDecodingContext(meta));
+    byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
+    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
+        onDiskBytes.length - DataBlockEncoding.ID_SIZE);
     seeker.setCurrentBuffer(readBuffer);
     KeyValue previousKV = null;
-    do{
+    do {
       KeyValue currentKV = seeker.getKeyValue();
+      System.out.println(currentKV);
       if (previousKV != null && KeyValue.COMPARATOR.compare(currentKV, previousKV) < 0) {
         dumpInputKVSet();
-        fail("Current kv " + currentKV + " is smaller than previous keyvalue "
-            + previousKV);
+        fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV);
+      }
+      if (!includesTag) {
+        assertFalse(currentKV.getTagsLength() > 0);
+      } else {
+        Assert.assertTrue(currentKV.getTagsLength() > 0);
       }
       previousKV = currentKV;
     } while (seeker.next());
@@ -139,15 +172,20 @@ public class TestPrefixTreeEncoding {
   public void testSeekWithRandomData() throws Exception {
     PrefixTreeCodec encoder = new PrefixTreeCodec();
     int batchId = numBatchesWritten++;
-    ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId);
+    ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId, includesTag);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(false);
+    meta.setIncludesMvcc(false);
+    meta.setIncludesTags(includesTag);
+    meta.setCompressAlgo(Algorithm.NONE);
     HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
-        Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
-    encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
-    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
+        DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
+    encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
+    EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
+        encoder.newDataBlockDecodingContext(meta));
     byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
-    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
-        DataBlockEncoding.ID_SIZE, onDiskBytes.length
-            - DataBlockEncoding.ID_SIZE);
+    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
+        onDiskBytes.length - DataBlockEncoding.ID_SIZE);
     verifySeeking(seeker, readBuffer, batchId);
   }
 
@@ -155,19 +193,23 @@ public class TestPrefixTreeEncoding {
   public void testSeekWithFixedData() throws Exception {
     PrefixTreeCodec encoder = new PrefixTreeCodec();
     int batchId = numBatchesWritten++;
-    ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId);
+    ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, includesTag);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(false);
+    meta.setIncludesMvcc(false);
+    meta.setIncludesTags(includesTag);
+    meta.setCompressAlgo(Algorithm.NONE);
     HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
-        Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
-    encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
+        DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
+    encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
     EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
-        false);
+        encoder.newDataBlockDecodingContext(meta));
     byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
-    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
-        DataBlockEncoding.ID_SIZE, onDiskBytes.length
-            - DataBlockEncoding.ID_SIZE);
+    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
+        onDiskBytes.length - DataBlockEncoding.ID_SIZE);
     verifySeeking(seeker, readBuffer, batchId);
   }
-
+  
   private void verifySeeking(EncodedSeeker encodeSeeker,
       ByteBuffer encodedData, int batchId) {
     List<KeyValue> kvList = new ArrayList<KeyValue>();
@@ -202,73 +244,93 @@ public class TestPrefixTreeEncoding {
       System.out.println(kv);
     }
   }
-  
-  private static ByteBuffer generateFixedTestData(
-      ConcurrentSkipListSet<KeyValue> kvset, int batchId) throws Exception {
-    return generateFixedTestData(kvset, batchId, true);
+
+  private static ByteBuffer generateFixedTestData(ConcurrentSkipListSet<KeyValue> kvset,
+      int batchId, boolean useTags) throws Exception {
+    return generateFixedTestData(kvset, batchId, true, useTags);
   }
 
-  private static ByteBuffer generateFixedTestData(
-      ConcurrentSkipListSet<KeyValue> kvset, int batchId, boolean partial)
-      throws Exception {
+  private static ByteBuffer generateFixedTestData(ConcurrentSkipListSet<KeyValue> kvset,
+      int batchId, boolean partial, boolean useTags) throws Exception {
     ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
     DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
     for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
-      if (partial && i / 10 % 2 == 1) continue;
+      if (partial && i / 10 % 2 == 1)
+        continue;
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
-        KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES,
-            getQualifier(j), getValue(batchId, i, j));
-        kvset.add(kv);
+        if (!useTags) {
+          KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), getValue(
+              batchId, i, j));
+          kvset.add(kv);
+        } else {
+          KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), 0l,
+              getValue(batchId, i, j), new Tag[] { new Tag((byte) 1, "metaValue1") });
+          kvset.add(kv);
+        }
       }
     }
     for (KeyValue kv : kvset) {
       userDataStream.writeInt(kv.getKeyLength());
       userDataStream.writeInt(kv.getValueLength());
-      userDataStream
-          .write(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
-      userDataStream.write(kv.getBuffer(), kv.getValueOffset(),
-          kv.getValueLength());
+      userDataStream.write(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+      userDataStream.write(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+      if (useTags) {
+        userDataStream.writeShort(kv.getTagsLength());
+        userDataStream.write(kv.getBuffer(), kv.getValueOffset() + kv.getValueLength()
+            + Bytes.SIZEOF_SHORT, kv.getTagsLength());
+      }
     }
     return ByteBuffer.wrap(baosInMemory.toByteArray());
   }
 
-  private static ByteBuffer generateRandomTestData(
-      ConcurrentSkipListSet<KeyValue> kvset, int batchId) throws Exception {
+  private static ByteBuffer generateRandomTestData(ConcurrentSkipListSet<KeyValue> kvset,
+      int batchId, boolean useTags) throws Exception {
     ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
     DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
     Random random = new Random();
     for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
-      if (random.nextInt(100) < 50) continue;
+      if (random.nextInt(100) < 50)
+        continue;
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
-        if (random.nextInt(100) < 50) continue;
-        KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES,
-            getQualifier(j), getValue(batchId, i, j));
-        kvset.add(kv);
+        if (random.nextInt(100) < 50)
+          continue;
+        if (!useTags) {
+          KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), getValue(
+              batchId, i, j));
+          kvset.add(kv);
+        } else {
+          KeyValue kv = new KeyValue(getRowKey(batchId, i), CF_BYTES, getQualifier(j), 0l,
+              getValue(batchId, i, j), new Tag[] { new Tag((byte) 1, "metaValue1") });
+          kvset.add(kv);
+        }
       }
     }
+
     for (KeyValue kv : kvset) {
       userDataStream.writeInt(kv.getKeyLength());
       userDataStream.writeInt(kv.getValueLength());
-      userDataStream
-          .write(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
-      userDataStream.write(kv.getBuffer(), kv.getValueOffset(),
-          kv.getValueLength());
+      userDataStream.write(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+      userDataStream.write(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+      if (useTags) {
+        userDataStream.writeShort(kv.getTagsLength());
+        userDataStream.write(kv.getBuffer(), kv.getValueOffset() + kv.getValueLength()
+            + Bytes.SIZEOF_SHORT, kv.getTagsLength());
+      }
     }
     return ByteBuffer.wrap(baosInMemory.toByteArray());
   }
 
   private static byte[] getRowKey(int batchId, int i) {
-    return Bytes.toBytes("batch" + batchId + "_row"
-        + (formatRowNum ? String.format("%04d", i) : i));
+    return Bytes
+        .toBytes("batch" + batchId + "_row" + (formatRowNum ? String.format("%04d", i) : i));
   }
 
   private static byte[] getQualifier(int j) {
-    return Bytes.toBytes("col" + j);
+    return Bytes.toBytes("colfdfafhfhsdfhsdfh" + j);
   }
 
   private static byte[] getValue(int batchId, int i, int j) {
-    return Bytes.toBytes("value_for_" + Bytes.toString(getRowKey(batchId, i))
-        + "_col" + j);
+    return Bytes.toBytes("value_for_" + Bytes.toString(getRowKey(batchId, i)) + "_col" + j);
   }
 
 }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java Sat Sep 21 18:01:32 2013
@@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.util.ChecksumType;
 
@@ -339,13 +341,18 @@ public class CacheTestUtils {
       cachedBuffer.putInt(uncompressedSizeWithoutHeader);
       cachedBuffer.putLong(prevBlockOffset);
       cachedBuffer.rewind();
-
+      HFileContext meta = new HFileContext();
+      meta.setUsesHBaseChecksum(false);
+      meta.setIncludesMvcc(includesMemstoreTS);
+      meta.setIncludesTags(false);
+      meta.setCompressAlgo(Compression.Algorithm.NONE);
+      meta.setBytesPerChecksum(0);
+      meta.setChecksumType(ChecksumType.NULL);
       HFileBlock generated = new HFileBlock(BlockType.DATA,
           onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
           prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
-          blockSize, includesMemstoreTS, HFileBlock.MINOR_VERSION_NO_CHECKSUM,
-          0, ChecksumType.NULL.getCode(),
-          onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE);
+          blockSize,
+          onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, meta);
 
       String strKey;
       /* No conflicting keys */

Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java?rev=1525269&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java Sat Sep 21 18:01:32 2013
@@ -0,0 +1,31 @@
+ /*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+/**
+ * Used in testcases only.  
+ */
+public enum TagUsage {
+  // No tags would be added
+  NO_TAG, 
+  // KVs with tags 
+  ONLY_TAG, 
+  // kvs with and without tags
+  PARTIAL_TAG;
+
+}
\ No newline at end of file

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java Sat Sep 21 18:01:32 2013
@@ -40,13 +40,14 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
@@ -183,6 +184,7 @@ public class TestCacheOnWrite {
   @Before
   public void setUp() throws IOException {
     conf = TEST_UTIL.getConfiguration();
+    this.conf.set("dfs.datanode.data.dir.perm", "700");
     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
@@ -207,13 +209,24 @@ public class TestCacheOnWrite {
 
   @Test
   public void testStoreFileCacheOnWrite() throws IOException {
-    writeStoreFile();
-    readStoreFile();
+    testStoreFileCacheOnWriteInternals(false);
+    testStoreFileCacheOnWriteInternals(true);
   }
 
-  private void readStoreFile() throws IOException {
-    HFileReaderV2 reader = (HFileReaderV2) HFile.createReaderWithEncoding(fs,
-        storeFilePath, cacheConf, encoder.getEncodingInCache());
+  protected void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException {
+    writeStoreFile(useTags);
+    readStoreFile(useTags);
+  }
+
+  private void readStoreFile(boolean useTags) throws IOException {
+    AbstractHFileReader reader;
+    if (useTags) {
+      reader = (HFileReaderV3) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
+          encoder.getEncodingInCache());
+    } else {
+      reader = (HFileReaderV2) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
+          encoder.getEncodingInCache());
+    }
     LOG.info("HFile information: " + reader);
     final boolean cacheBlocks = false;
     final boolean pread = false;
@@ -260,10 +273,13 @@ public class TestCacheOnWrite {
     String countByType = blockCountByType.toString();
     BlockType cachedDataBlockType =
         encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
-    assertEquals("{" + cachedDataBlockType
-        + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}",
-        countByType);
-
+    if (useTags) {
+      assertEquals("{" + cachedDataBlockType
+          + "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType);
+    } else {
+      assertEquals("{" + cachedDataBlockType
+          + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}", countByType);
+    }
     reader.close();
   }
 
@@ -283,33 +299,54 @@ public class TestCacheOnWrite {
     }
   }
 
-  public void writeStoreFile() throws IOException {
+  public void writeStoreFile(boolean useTags) throws IOException {
+    if(useTags) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    } else {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
+    }
     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
         "test_cache_on_write");
-    StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs,
-        DATA_BLOCK_SIZE)
-            .withOutputDir(storeFileParentDir)
-            .withCompression(compress)
-            .withDataBlockEncoder(encoder)
-            .withComparator(KeyValue.COMPARATOR)
-            .withBloomType(BLOOM_TYPE)
-            .withMaxKeyCount(NUM_KV)
-            .withChecksumType(CKTYPE)
-            .withBytesPerChecksum(CKBYTES)
-            .build();
+    HFileContext meta = new HFileContext();
+    meta.setCompressAlgo(compress);
+    meta.setChecksumType(CKTYPE);
+    meta.setBytesPerChecksum(CKBYTES);
+    meta.setBlocksize(DATA_BLOCK_SIZE);
+    meta.setEncodingInCache(encoder.getEncodingInCache());
+    meta.setEncodingOnDisk(encoder.getEncodingOnDisk());
+    StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+        .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
+        .withFileContext(meta)
+        .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
 
     final int rowLen = 32;
     for (int i = 0; i < NUM_KV; ++i) {
       byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
       byte[] v = TestHFileWriterV2.randomValue(rand);
       int cfLen = rand.nextInt(k.length - rowLen + 1);
-      KeyValue kv = new KeyValue(
+      KeyValue kv;
+      if(useTags) {
+        Tag t = new Tag((byte) 1, "visibility");
+        List<Tag> tagList = new ArrayList<Tag>();
+        tagList.add(t);
+        Tag[] tags = new Tag[1];
+        tags[0] = t;
+        kv = new KeyValue(
+            k, 0, rowLen,
+            k, rowLen, cfLen,
+            k, rowLen + cfLen, k.length - rowLen - cfLen,
+            rand.nextLong(),
+            generateKeyType(rand),
+            v, 0, v.length, tagList);
+      } else {
+        kv = new KeyValue(
           k, 0, rowLen,
           k, rowLen, cfLen,
           k, rowLen + cfLen, k.length - rowLen - cfLen,
           rand.nextLong(),
           generateKeyType(rand),
           v, 0, v.length);
+      }
       sfw.append(kv);
     }
 
@@ -319,6 +356,16 @@ public class TestCacheOnWrite {
 
   @Test
   public void testNotCachingDataBlocksDuringCompaction() throws IOException {
+    testNotCachingDataBlocksDuringCompactionInternals(false);
+    testNotCachingDataBlocksDuringCompactionInternals(true);
+  }
+
+  protected void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException {
+    if (useTags) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    } else {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
+    }
     // TODO: need to change this test if we add a cache size threshold for
     // compactions, or if we implement some other kind of intelligent logic for
     // deciding what blocks to cache-on-write on compaction.
@@ -347,8 +394,14 @@ public class TestCacheOnWrite {
           String qualStr = "col" + iCol;
           String valueStr = "value_" + rowStr + "_" + qualStr;
           for (int iTS = 0; iTS < 5; ++iTS) {
-            p.add(cfBytes, Bytes.toBytes(qualStr), ts++,
-                Bytes.toBytes(valueStr));
+            if (useTags) {
+              Tag t = new Tag((byte) 1, "visibility");
+              Tag[] tags = new Tag[1];
+              tags[0] = t;
+              p.add(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr), tags);
+            } else {
+              p.add(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
+            }
           }
         }
         region.put(p);
@@ -369,6 +422,5 @@ public class TestCacheOnWrite {
     region.close();
     blockCache.shutdown();
   }
-
 }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java Sat Sep 21 18:01:32 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.SmallTest
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.util.ChecksumType;
 
@@ -79,6 +80,11 @@ public class TestChecksum {
    */
   @Test
   public void testChecksumCorruption() throws IOException {
+    testChecksumCorruptionInternals(false);
+    testChecksumCorruptionInternals(true);
+  }
+
+  protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
         LOG.info("testChecksumCorruption: Compression algorithm: " + algo +
@@ -86,9 +92,13 @@ public class TestChecksum {
         Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
             + algo);
         FSDataOutputStream os = fs.create(path);
-        HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
-            true, HFile.DEFAULT_CHECKSUM_TYPE,
-            HFile.DEFAULT_BYTES_PER_CHECKSUM);
+        HFileContext meta = new HFileContext();
+        meta.setCompressAlgo(algo);
+        meta.setIncludesMvcc(true);
+        meta.setIncludesTags(useTags);
+        meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+        meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+        HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
         long totalSize = 0;
         for (int blockId = 0; blockId < 2; ++blockId) {
           DataOutputStream dos = hbw.startWriting(BlockType.DATA);
@@ -104,8 +114,12 @@ public class TestChecksum {
 
         // Do a read that purposely introduces checksum verification failures.
         FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
-        HFileBlock.FSReader hbr = new FSReaderV2Test(is, algo,
-            totalSize, HFile.MAX_FORMAT_VERSION, fs, path);
+        meta = new HFileContext();
+        meta.setCompressAlgo(algo);
+        meta.setIncludesMvcc(true);
+        meta.setIncludesTags(useTags);
+        meta.setUsesHBaseChecksum(true);
+        HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
         HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
         b.sanityCheck();
         assertEquals(4936, b.getUncompressedSizeWithoutHeader());
@@ -147,8 +161,7 @@ public class TestChecksum {
         HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
         assertEquals(false, newfs.useHBaseChecksum());
         is = new FSDataInputStreamWrapper(newfs, path);
-        hbr = new FSReaderV2Test(is, algo,
-            totalSize, HFile.MAX_FORMAT_VERSION, newfs, path);
+        hbr = new FSReaderV2Test(is, totalSize, newfs, path, meta);
         b = hbr.readBlockData(0, -1, -1, pread);
         is.close();
         b.sanityCheck();
@@ -173,14 +186,26 @@ public class TestChecksum {
    */
   @Test
   public void testChecksumChunks() throws IOException {
+    testChecksumInternals(false);
+    testChecksumInternals(true);
+  }
+
+  protected void testChecksumInternals(boolean useTags) throws IOException {
     Compression.Algorithm algo = NONE;
     for (boolean pread : new boolean[] { false, true }) {
       for (int bytesPerChecksum : BYTES_PER_CHECKSUM) {
         Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + 
                              algo + bytesPerChecksum);
         FSDataOutputStream os = fs.create(path);
-        HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
-          true, HFile.DEFAULT_CHECKSUM_TYPE, bytesPerChecksum);
+        HFileContext meta = new HFileContext();
+        meta.setCompressAlgo(algo);
+        meta.setIncludesMvcc(true);
+        meta.setIncludesTags(useTags);
+        meta.setUsesHBaseChecksum(true);
+        meta.setBytesPerChecksum(bytesPerChecksum);
+        meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+        HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+           meta);
 
         // write one block. The block has data
         // that is at least 6 times more than the checksum chunk size
@@ -211,8 +236,14 @@ public class TestChecksum {
         // Read data back from file.
         FSDataInputStream is = fs.open(path);
         FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
+        meta = new HFileContext();
+        meta.setCompressAlgo(algo);
+        meta.setIncludesMvcc(true);
+        meta.setIncludesTags(useTags);
+        meta.setUsesHBaseChecksum(true);
+        meta.setBytesPerChecksum(bytesPerChecksum);
         HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(
-            is, nochecksum), algo, totalSize, HFile.MAX_FORMAT_VERSION, hfs, path);
+            is, nochecksum), totalSize, hfs, path, meta);
         HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
         is.close();
         b.sanityCheck();
@@ -257,9 +288,9 @@ public class TestChecksum {
    * checksum validations.
    */
   static private class FSReaderV2Test extends HFileBlock.FSReaderV2 {
-    public FSReaderV2Test(FSDataInputStreamWrapper istream, Algorithm algo, long fileSize,
-        int minorVersion, FileSystem fs,Path path) throws IOException {
-      super(istream, algo, fileSize, minorVersion, (HFileSystem)fs, path);
+    public FSReaderV2Test(FSDataInputStreamWrapper istream, long fileSize, FileSystem fs,
+        Path path, HFileContext meta) throws IOException {
+      super(istream, fileSize, (HFileSystem) fs, path, meta);
     }
 
     @Override

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java Sat Sep 21 18:01:32 2013
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.List;
 
 import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,12 +50,13 @@ import org.apache.hadoop.fs.Path;
 public class TestFixedFileTrailer {
 
   private static final Log LOG = LogFactory.getLog(TestFixedFileTrailer.class);
+  private static final int MAX_COMPARATOR_NAME_LENGTH = 128;
 
   /**
    * The number of used fields by version. Indexed by version minus two. 
    * Min version that we support is V2
    */
-  private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14 };
+  private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14, 14 };
 
   private HBaseTestingUtility util = new HBaseTestingUtility();
   private FileSystem fs;
@@ -86,7 +88,7 @@ public class TestFixedFileTrailer {
   @Test
   public void testTrailer() throws IOException {
     FixedFileTrailer t = new FixedFileTrailer(version, 
-                           HFileBlock.MINOR_VERSION_NO_CHECKSUM);
+        HFileReaderV2.PBUF_TRAILER_MINOR_VERSION);
     t.setDataIndexCount(3);
     t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
 
@@ -119,7 +121,7 @@ public class TestFixedFileTrailer {
     {
       DataInputStream dis = new DataInputStream(bais);
       FixedFileTrailer t2 = new FixedFileTrailer(version, 
-                              HFileBlock.MINOR_VERSION_NO_CHECKSUM);
+          HFileReaderV2.PBUF_TRAILER_MINOR_VERSION);
       t2.deserialize(dis);
       assertEquals(-1, bais.read()); // Ensure we have read everything.
       checkLoadedTrailer(version, t, t2);
@@ -163,6 +165,68 @@ public class TestFixedFileTrailer {
         trailerStr.split(", ").length);
     assertEquals(trailerStr, t4.toString());
   }
+  
+  @Test
+  public void testTrailerForV2NonPBCompatibility() throws Exception {
+    if (version == 2) {
+      FixedFileTrailer t = new FixedFileTrailer(version,
+          HFileReaderV2.MINOR_VERSION_NO_CHECKSUM);
+      t.setDataIndexCount(3);
+      t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
+      t.setLastDataBlockOffset(291);
+      t.setNumDataIndexLevels(3);
+      t.setComparatorClass(KeyValue.COMPARATOR.getClass());
+      t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic.
+      t.setUncompressedDataIndexSize(827398717L); // Something random.
+      t.setLoadOnOpenOffset(128);
+      t.setMetaIndexCount(7);
+      t.setTotalUncompressedBytes(129731987);
+
+      {
+        DataOutputStream dos = new DataOutputStream(baos); // Limited scope.
+        serializeAsWritable(dos, t);
+        dos.flush();
+        assertEquals(FixedFileTrailer.getTrailerSize(version), dos.size());
+      }
+
+      byte[] bytes = baos.toByteArray();
+      baos.reset();
+      assertEquals(bytes.length, FixedFileTrailer.getTrailerSize(version));
+
+      ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
+      {
+        DataInputStream dis = new DataInputStream(bais);
+        FixedFileTrailer t2 = new FixedFileTrailer(version,
+            HFileReaderV2.MINOR_VERSION_NO_CHECKSUM);
+        t2.deserialize(dis);
+        assertEquals(-1, bais.read()); // Ensure we have read everything.
+        checkLoadedTrailer(version, t, t2);
+      }
+    }
+  }
+
+  // Copied from FixedFileTrailer for testing the reading part of
+  // FixedFileTrailer of non PB
+  // serialized FFTs.
+  private void serializeAsWritable(DataOutputStream output, FixedFileTrailer fft)
+      throws IOException {
+    BlockType.TRAILER.write(output);
+    output.writeLong(fft.getFileInfoOffset());
+    output.writeLong(fft.getLoadOnOpenDataOffset());
+    output.writeInt(fft.getDataIndexCount());
+    output.writeLong(fft.getUncompressedDataIndexSize());
+    output.writeInt(fft.getMetaIndexCount());
+    output.writeLong(fft.getTotalUncompressedBytes());
+    output.writeLong(fft.getEntryCount());
+    output.writeInt(fft.getCompressionCodec().ordinal());
+    output.writeInt(fft.getNumDataIndexLevels());
+    output.writeLong(fft.getFirstDataBlockOffset());
+    output.writeLong(fft.getLastDataBlockOffset());
+    Bytes.writeStringFixedSize(output, fft.getComparatorClassName(), MAX_COMPARATOR_NAME_LENGTH);
+    output.writeInt(FixedFileTrailer.materializeVersion(fft.getMajorVersion(),
+        fft.getMinorVersion()));
+  }
+ 
 
   private FixedFileTrailer readTrailer(Path trailerPath) throws IOException {
     FSDataInputStream fsdis = fs.open(trailerPath);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Sat Sep 21 18:01:32 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
@@ -82,8 +83,10 @@ public class TestHFile extends HBaseTest
   public void testEmptyHFile() throws IOException {
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path f = new Path(ROOT_DIR, getName());
+    HFileContext context = new HFileContext();
+    context.setIncludesTags(false);
     Writer w =
-        HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).create();
+        HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
     w.close();
     Reader r = HFile.createReader(fs, f, cacheConf);
     r.loadFileInfo();
@@ -130,8 +133,10 @@ public class TestHFile extends HBaseTest
   public void testCorruptTruncatedHFile() throws IOException {
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path f = new Path(ROOT_DIR, getName());
-    Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
-    writeSomeRecords(w, 0, 100);
+    HFileContext  context = new HFileContext();
+    Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
+        .withFileContext(context).create();
+    writeSomeRecords(w, 0, 100, false);
     w.close();
 
     Path trunc = new Path(f.getParent(), "trucated");
@@ -148,12 +153,17 @@ public class TestHFile extends HBaseTest
 
   // write some records into the tfile
   // write them twice
-  private int writeSomeRecords(Writer writer, int start, int n)
+  private int writeSomeRecords(Writer writer, int start, int n, boolean useTags)
       throws IOException {
     String value = "value";
     for (int i = start; i < (start + n); i++) {
       String key = String.format(localFormatter, Integer.valueOf(i));
-      writer.append(Bytes.toBytes(key), Bytes.toBytes(value + key));
+      if (useTags) {
+        Tag t = new Tag((byte) 1, "myTag1");
+        writer.append(Bytes.toBytes(key), Bytes.toBytes(value + key), t.getBuffer());
+      } else {
+        writer.append(Bytes.toBytes(key), Bytes.toBytes(value + key));
+      }
     }
     return (start + n);
   }
@@ -192,8 +202,8 @@ public class TestHFile extends HBaseTest
     return String.format(localFormatter, Integer.valueOf(rowId)).getBytes();
   }
 
-  private void writeRecords(Writer writer) throws IOException {
-    writeSomeRecords(writer, 0, 100);
+  private void writeRecords(Writer writer, boolean useTags) throws IOException {
+    writeSomeRecords(writer, 0, 100, useTags);
     writer.close();
   }
 
@@ -205,20 +215,26 @@ public class TestHFile extends HBaseTest
 
   /**
    * test none codecs
+   * @param useTags 
    */
-  void basicWithSomeCodec(String codec) throws IOException {
+  void basicWithSomeCodec(String codec, boolean useTags) throws IOException {
+    if (useTags) {
+      conf.setInt("hfile.format.version", 3);
+    }
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
-    Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
+    Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
     FSDataOutputStream fout = createFSOutput(ncTFile);
+    HFileContext meta = new HFileContext();
+    meta.setBlocksize(minBlockSize);
+    meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codec));
     Writer writer = HFile.getWriterFactory(conf, cacheConf)
         .withOutputStream(fout)
-        .withBlockSize(minBlockSize)
-        .withCompression(codec)
+        .withFileContext(meta)
         // NOTE: This test is dependent on this deprecated nonstandard comparator
         .withComparator(new KeyValue.RawBytesComparator())
         .create();
     LOG.info(writer);
-    writeRecords(writer);
+    writeRecords(writer, useTags);
     fout.close();
     FSDataInputStream fin = fs.open(ncTFile);
     Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
@@ -250,8 +266,13 @@ public class TestHFile extends HBaseTest
   }
 
   public void testTFileFeatures() throws IOException {
-    basicWithSomeCodec("none");
-    basicWithSomeCodec("gz");
+    testTFilefeaturesInternals(false);
+    testTFilefeaturesInternals(true);
+  }
+
+  protected void testTFilefeaturesInternals(boolean useTags) throws IOException {
+    basicWithSomeCodec("none", useTags);
+    basicWithSomeCodec("gz", useTags);
   }
 
   private void writeNumMetablocks(Writer writer, int n) {
@@ -292,10 +313,12 @@ public class TestHFile extends HBaseTest
     if (cacheConf == null) cacheConf = new CacheConfig(conf);
     Path mFile = new Path(ROOT_DIR, "meta.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
+    HFileContext meta = new HFileContext();
+    meta.setCompressAlgo(AbstractHFileWriter.compressionByName(compress));
+    meta.setBlocksize(minBlockSize);
     Writer writer = HFile.getWriterFactory(conf, cacheConf)
         .withOutputStream(fout)
-        .withBlockSize(minBlockSize)
-        .withCompression(compress)
+        .withFileContext(meta)
         .create();
     someTestingWithMetaBlock(writer);
     writer.close();
@@ -324,10 +347,12 @@ public class TestHFile extends HBaseTest
         HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
       Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
       FSDataOutputStream fout = createFSOutput(mFile);
+      HFileContext meta = new HFileContext();
+      meta.setCompressAlgo((compressAlgo));
+      meta.setBlocksize(minBlockSize);
       Writer writer = HFile.getWriterFactory(conf, cacheConf)
           .withOutputStream(fout)
-          .withBlockSize(minBlockSize)
-          .withCompression(compressAlgo)
+          .withFileContext(meta)
           .create();
       writer.append("foo".getBytes(), "value".getBytes());
       writer.close();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java Sat Sep 21 18:01:32 2013
@@ -18,7 +18,11 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.GZ;
+import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.NONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
@@ -33,6 +37,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executor;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.Executors;
@@ -46,8 +51,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.DoubleOutputStream;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -61,9 +67,6 @@ import org.apache.hadoop.hbase.util.Chec
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.compress.Compressor;
-
-import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.*;
-
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -97,14 +100,15 @@ public class TestHFileBlock {
   private int uncompressedSizeV1;
 
   private final boolean includesMemstoreTS;
-
-  public TestHFileBlock(boolean includesMemstoreTS) {
+  private final boolean includesTag;
+  public TestHFileBlock(boolean includesMemstoreTS, boolean includesTag) {
     this.includesMemstoreTS = includesMemstoreTS;
+    this.includesTag = includesTag;
   }
 
   @Parameters
   public static Collection<Object[]> parameters() {
-    return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
+    return HBaseTestingUtility.MEMSTORETS_TAGS_PARAMETRIZED;
   }
 
   @Before
@@ -118,7 +122,7 @@ public class TestHFileBlock {
       dos.writeInt(i / 100);
   }
 
- static int writeTestKeyValues(OutputStream dos, int seed, boolean includesMemstoreTS)
+ static int writeTestKeyValues(OutputStream dos, int seed, boolean includesMemstoreTS, boolean useTag)
       throws IOException {
     List<KeyValue> keyValues = new ArrayList<KeyValue>();
     Random randomizer = new Random(42l + seed); // just any fixed number
@@ -163,24 +167,37 @@ public class TestHFileBlock {
       } else {
         timestamp = randomizer.nextLong();
       }
-
-      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
+      if (!useTag) {
+        keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
+      } else {
+        keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
+            (byte) 1, Bytes.toBytes("myTagVal")) }));
+      }
     }
 
     // sort it and write to stream
     int totalSize = 0;
-    Collections.sort(keyValues, KeyValue.COMPARATOR);
+   Collections.sort(keyValues, KeyValue.COMPARATOR);
     DataOutputStream dataOutputStream = new DataOutputStream(dos);
+
     for (KeyValue kv : keyValues) {
+      dataOutputStream.writeInt(kv.getKeyLength());
+      dataOutputStream.writeInt(kv.getValueLength());
+      dataOutputStream.write(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+      dataOutputStream.write(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+      // Write the additonal tag into the stream
+      // always write the taglength
       totalSize += kv.getLength();
-      dataOutputStream.write(kv.getBuffer(), kv.getOffset(), kv.getLength());
+      if (useTag) {
+        dataOutputStream.writeShort(kv.getTagsLength());
+        dataOutputStream.write(kv.getBuffer(), kv.getTagsOffset(), kv.getTagsLength());
+      }
       if (includesMemstoreTS) {
         long memstoreTS = randomizer.nextLong();
         WritableUtils.writeVLong(dataOutputStream, memstoreTS);
         totalSize += WritableUtils.getVIntSize(memstoreTS);
       }
     }
-
     return totalSize;
   }
 
@@ -199,11 +216,15 @@ public class TestHFileBlock {
   }
 
   static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
-      boolean includesMemstoreTS) throws IOException {
+      boolean includesMemstoreTS, boolean includesTag) throws IOException {
     final BlockType blockType = BlockType.DATA;
-    HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
-        includesMemstoreTS, HFile.DEFAULT_CHECKSUM_TYPE,
-        HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    HFileContext meta = new HFileContext();
+    meta.setCompressAlgo(algo);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(includesTag);
+    meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
     DataOutputStream dos = hbw.startWriting(blockType);
     writeTestBlockContents(dos);
     dos.flush();
@@ -214,8 +235,8 @@ public class TestHFileBlock {
   }
 
   public String createTestBlockStr(Compression.Algorithm algo,
-      int correctLength) throws IOException {
-    HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS);
+      int correctLength, boolean useTag) throws IOException {
+    HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag);
     byte[] testV2Block = hbw.getHeaderAndDataForTest();
     int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9;
     if (testV2Block.length == correctLength) {
@@ -231,7 +252,7 @@ public class TestHFileBlock {
 
   @Test
   public void testNoCompression() throws IOException {
-    assertEquals(4000, createTestV2Block(NONE, includesMemstoreTS).
+    assertEquals(4000, createTestV2Block(NONE, includesMemstoreTS, false).
                  getBlockForCaching().getUncompressedSizeWithoutHeader());
   }
 
@@ -257,7 +278,7 @@ public class TestHFileBlock {
             + "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
             + "\\x00\\x00\\x00\\x00"; //  4 byte checksum (ignored)
     final int correctGzipBlockLength = 95;
-    final String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength);
+    final String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false);
     // We ignore the block checksum because createTestBlockStr can change the
     // gzip header after the block is produced
     assertEquals(correctTestBlockStr.substring(0, correctGzipBlockLength - 4),
@@ -266,6 +287,13 @@ public class TestHFileBlock {
 
   @Test
   public void testReaderV2() throws IOException {
+    testReaderV2Internals();
+  }
+
+  protected void testReaderV2Internals() throws IOException {
+    if(includesTag) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    }
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
           LOG.info("testReaderV2: Compression algorithm: " + algo +
@@ -273,9 +301,14 @@ public class TestHFileBlock {
         Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
             + algo);
         FSDataOutputStream os = fs.create(path);
-        HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
-            includesMemstoreTS, HFile.DEFAULT_CHECKSUM_TYPE,
-            HFile.DEFAULT_BYTES_PER_CHECKSUM);
+        HFileContext meta = new HFileContext();
+        meta.setCompressAlgo(algo);
+        meta.setIncludesMvcc(includesMemstoreTS);
+        meta.setIncludesTags(includesTag);
+        meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+        meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+        HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+           meta);
         long totalSize = 0;
         for (int blockId = 0; blockId < 2; ++blockId) {
           DataOutputStream dos = hbw.startWriting(BlockType.DATA);
@@ -287,8 +320,12 @@ public class TestHFileBlock {
         os.close();
 
         FSDataInputStream is = fs.open(path);
-        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
-            totalSize);
+        meta = new HFileContext();
+        meta.setUsesHBaseChecksum(true);
+        meta.setIncludesMvcc(includesMemstoreTS);
+        meta.setIncludesTags(includesTag);
+        meta.setCompressAlgo(algo);
+        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
         HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
         is.close();
         assertEquals(0, HFile.getChecksumFailuresCount());
@@ -301,7 +338,7 @@ public class TestHFileBlock {
 
         if (algo == GZ) {
           is = fs.open(path);
-          hbr = new HFileBlock.FSReaderV2(is, algo, totalSize);
+          hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
           b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE +
                                 b.totalChecksumBytes(), -1, pread);
           assertEquals(blockStr, b.toString());
@@ -330,7 +367,14 @@ public class TestHFileBlock {
    */
   @Test
   public void testDataBlockEncoding() throws IOException {
+    testInternals();
+  }
+
+  private void testInternals() throws IOException {
     final int numBlocks = 5;
+    if(includesTag) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    }
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
         for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
@@ -339,27 +383,35 @@ public class TestHFileBlock {
           FSDataOutputStream os = fs.create(path);
           HFileDataBlockEncoder dataBlockEncoder =
               new HFileDataBlockEncoderImpl(encoding);
-          HFileBlock.Writer hbw = new HFileBlock.Writer(algo, dataBlockEncoder,
-              includesMemstoreTS, HFile.DEFAULT_CHECKSUM_TYPE,
-              HFile.DEFAULT_BYTES_PER_CHECKSUM);
+          HFileContext meta = new HFileContext();
+          meta.setCompressAlgo(algo);
+          meta.setIncludesMvcc(includesMemstoreTS);
+          meta.setIncludesTags(includesTag);
+          meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+          meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+          HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder,
+             meta);
           long totalSize = 0;
           final List<Integer> encodedSizes = new ArrayList<Integer>();
           final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
           for (int blockId = 0; blockId < numBlocks; ++blockId) {
             DataOutputStream dos = hbw.startWriting(BlockType.DATA);
             writeEncodedBlock(algo, encoding, dos, encodedSizes, encodedBlocks,
-                blockId, includesMemstoreTS, HConstants.HFILEBLOCK_DUMMY_HEADER);
+                blockId, includesMemstoreTS, HConstants.HFILEBLOCK_DUMMY_HEADER, includesTag);
             hbw.writeHeaderAndData(os);
             totalSize += hbw.getOnDiskSizeWithHeader();
           }
           os.close();
 
           FSDataInputStream is = fs.open(path);
-          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
-              totalSize);
+          meta = new HFileContext();
+          meta.setUsesHBaseChecksum(true);
+          meta.setIncludesMvcc(includesMemstoreTS);
+          meta.setIncludesTags(includesTag);
+          meta.setCompressAlgo(algo);
+          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
           hbr.setDataBlockEncoder(dataBlockEncoder);
           hbr.setIncludesMemstoreTS(includesMemstoreTS);
-
           HFileBlock b;
           int pos = 0;
           for (int blockId = 0; blockId < numBlocks; ++blockId) {
@@ -393,28 +445,31 @@ public class TestHFileBlock {
 
   static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
        DataOutputStream dos, final List<Integer> encodedSizes,
-      final List<ByteBuffer> encodedBlocks, int blockId,
-      boolean includesMemstoreTS, byte[] dummyHeader) throws IOException {
+      final List<ByteBuffer> encodedBlocks, int blockId, 
+      boolean includesMemstoreTS, byte[] dummyHeader, boolean useTag) throws IOException {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     DoubleOutputStream doubleOutputStream =
         new DoubleOutputStream(dos, baos);
-    writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS);
+    writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS, useTag);
     ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
     rawBuf.rewind();
 
     DataBlockEncoder encoder = encoding.getEncoder();
     int headerLen = dummyHeader.length;
     byte[] encodedResultWithHeader = null;
+    HFileContext meta = new HFileContext();
+    meta.setCompressAlgo(algo);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(useTag);
     if (encoder != null) {
-      HFileBlockEncodingContext encodingCtx =
-          encoder.newDataBlockEncodingContext(algo, encoding, dummyHeader);
-      encoder.encodeKeyValues(rawBuf, includesMemstoreTS,
-          encodingCtx);
+      HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
+          dummyHeader, meta);
+      encoder.encodeKeyValues(rawBuf, encodingCtx);
       encodedResultWithHeader =
           encodingCtx.getUncompressedBytesWithHeader();
     } else {
-      HFileBlockDefaultEncodingContext defaultEncodingCtx =
-        new HFileBlockDefaultEncodingContext(algo, encoding, dummyHeader);
+      HFileBlockDefaultEncodingContext defaultEncodingCtx = new HFileBlockDefaultEncodingContext(
+          encoding, dummyHeader, meta);
       byte[] rawBufWithHeader =
           new byte[rawBuf.array().length + headerLen];
       System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
@@ -474,6 +529,10 @@ public class TestHFileBlock {
 
   @Test
   public void testPreviousOffset() throws IOException {
+    testPreviousOffsetInternals();
+  }
+
+  protected void testPreviousOffsetInternals() throws IOException {
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : BOOLEAN_VALUES) {
         for (boolean cacheOnWrite : BOOLEAN_VALUES) {
@@ -491,8 +550,12 @@ public class TestHFileBlock {
               expectedPrevOffsets, expectedTypes, expectedContents);
 
           FSDataInputStream is = fs.open(path);
-          HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
-              totalSize);
+          HFileContext meta = new HFileContext();
+          meta.setUsesHBaseChecksum(true);
+          meta.setIncludesMvcc(includesMemstoreTS);
+          meta.setIncludesTags(includesTag);
+          meta.setCompressAlgo(algo);
+          HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
           long curOffset = 0;
           for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
             if (!pread) {
@@ -656,6 +719,11 @@ public class TestHFileBlock {
 
   @Test
   public void testConcurrentReading() throws Exception {
+    testConcurrentReadingInternals();
+  }
+
+  protected void testConcurrentReadingInternals() throws IOException,
+      InterruptedException, ExecutionException {
     for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
       Path path =
           new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
@@ -665,8 +733,12 @@ public class TestHFileBlock {
       writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
       FSDataInputStream is = fs.open(path);
       long fileSize = fs.getFileStatus(path).getLen();
-      HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo,
-          fileSize);
+      HFileContext meta = new HFileContext();
+      meta.setUsesHBaseChecksum(true);
+      meta.setIncludesMvcc(includesMemstoreTS);
+      meta.setIncludesTags(includesTag);
+      meta.setCompressAlgo(compressAlgo);
+      HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);
 
       Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
       ExecutorCompletionService<Boolean> ecs =
@@ -697,9 +769,14 @@ public class TestHFileBlock {
   ) throws IOException {
     boolean cacheOnWrite = expectedContents != null;
     FSDataOutputStream os = fs.create(path);
-    HFileBlock.Writer hbw = new HFileBlock.Writer(compressAlgo, null,
-        includesMemstoreTS, HFile.DEFAULT_CHECKSUM_TYPE,
-        HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(true);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(includesTag);
+    meta.setCompressAlgo(compressAlgo);
+    meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
     Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
     long totalSize = 0;
     for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
@@ -749,6 +826,10 @@ public class TestHFileBlock {
 
   @Test
   public void testBlockHeapSize() {
+    testBlockHeapSizeInternals();
+  }
+
+  protected void testBlockHeapSizeInternals() {
     if (ClassSize.is32BitJVM()) {
       assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 64);
     } else {
@@ -758,16 +839,24 @@ public class TestHFileBlock {
     for (int size : new int[] { 100, 256, 12345 }) {
       byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
       ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
+      HFileContext meta = new HFileContext();
+      meta.setIncludesMvcc(includesMemstoreTS);
+      meta.setIncludesTags(includesTag);
+      meta.setUsesHBaseChecksum(false);
+      meta.setCompressAlgo(Algorithm.NONE);
+      meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+      meta.setChecksumType(ChecksumType.NULL);
+      meta.setBytesPerChecksum(0);
       HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
-          HFileBlock.FILL_HEADER, -1, includesMemstoreTS,
-          HFileBlock.MINOR_VERSION_NO_CHECKSUM, 0, ChecksumType.NULL.getCode(),
-          0);
+          HFileBlock.FILL_HEADER, -1, 
+          0, meta);
       long byteBufferExpectedSize =
           ClassSize.align(ClassSize.estimateBase(buf.getClass(), true)
               + HConstants.HFILEBLOCK_HEADER_SIZE + size);
+      long hfileMetaSize =  ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
       long hfileBlockExpectedSize =
           ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
-      long expected = hfileBlockExpectedSize + byteBufferExpectedSize;
+      long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
       assertEquals("Block data size: " + size + ", byte buffer expected " +
           "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
           "size: " + hfileBlockExpectedSize + ";", expected,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java Sat Sep 21 18:01:32 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.compre
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -84,14 +85,16 @@ public class TestHFileBlockCompatibility
   private int uncompressedSizeV1;
 
   private final boolean includesMemstoreTS;
+  private final boolean includesTag;
 
-  public TestHFileBlockCompatibility(boolean includesMemstoreTS) {
+  public TestHFileBlockCompatibility(boolean includesMemstoreTS, boolean includesTag) {
     this.includesMemstoreTS = includesMemstoreTS;
+    this.includesTag = includesTag;
   }
 
   @Parameters
   public static Collection<Object[]> parameters() {
-    return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
+    return HBaseTestingUtility.MEMSTORETS_TAGS_PARAMETRIZED;
   }
 
   @Before
@@ -117,7 +120,7 @@ public class TestHFileBlockCompatibility
       throws IOException {
     final BlockType blockType = BlockType.DATA;
     Writer hbw = new Writer(algo, null,
-        includesMemstoreTS);
+        includesMemstoreTS, includesTag);
     DataOutputStream dos = hbw.startWriting(blockType);
     TestHFileBlock.writeTestBlockContents(dos);
     // make sure the block is ready by calling hbw.getHeaderAndData()
@@ -144,7 +147,7 @@ public class TestHFileBlockCompatibility
   @Test
   public void testNoCompression() throws IOException {
     assertEquals(4000, createTestV2Block(NONE).getBlockForCaching().
-                       getUncompressedSizeWithoutHeader());
+        getUncompressedSizeWithoutHeader());
   }
 
   @Test
@@ -172,6 +175,9 @@ public class TestHFileBlockCompatibility
 
   @Test
   public void testReaderV2() throws IOException {
+    if(includesTag) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    }
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
           LOG.info("testReaderV2: Compression algorithm: " + algo +
@@ -180,7 +186,7 @@ public class TestHFileBlockCompatibility
             + algo);
         FSDataOutputStream os = fs.create(path);
         Writer hbw = new Writer(algo, null,
-            includesMemstoreTS);
+            includesMemstoreTS, includesTag);
         long totalSize = 0;
         for (int blockId = 0; blockId < 2; ++blockId) {
           DataOutputStream dos = hbw.startWriting(BlockType.DATA);
@@ -192,8 +198,13 @@ public class TestHFileBlockCompatibility
         os.close();
 
         FSDataInputStream is = fs.open(path);
+        HFileContext meta = new HFileContext();
+        meta.setUsesHBaseChecksum(false);
+        meta.setIncludesMvcc(includesMemstoreTS);
+        meta.setIncludesTags(includesTag);
+        meta.setCompressAlgo(algo);
         HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
-            algo, totalSize, MINOR_VERSION, fs, path);
+            totalSize, fs, path, meta);
         HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
         is.close();
 
@@ -205,8 +216,8 @@ public class TestHFileBlockCompatibility
 
         if (algo == GZ) {
           is = fs.open(path);
-          hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
-              algo, totalSize, MINOR_VERSION, fs, path);
+          hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is), totalSize, fs, path,
+              meta);
           b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
                                 b.totalChecksumBytes(), -1, pread);
           assertEquals(blockStr, b.toString());
@@ -235,6 +246,9 @@ public class TestHFileBlockCompatibility
    */
   @Test
   public void testDataBlockEncoding() throws IOException {
+    if(includesTag) {
+      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    }
     final int numBlocks = 5;
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
@@ -250,7 +264,7 @@ public class TestHFileBlockCompatibility
                   TestHFileBlockCompatibility.Writer.DUMMY_HEADER);
           TestHFileBlockCompatibility.Writer hbw =
               new TestHFileBlockCompatibility.Writer(algo,
-                  dataBlockEncoder, includesMemstoreTS);
+                  dataBlockEncoder, includesMemstoreTS, includesTag);
           long totalSize = 0;
           final List<Integer> encodedSizes = new ArrayList<Integer>();
           final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
@@ -258,7 +272,7 @@ public class TestHFileBlockCompatibility
             DataOutputStream dos = hbw.startWriting(BlockType.DATA);
             TestHFileBlock.writeEncodedBlock(algo, encoding, dos, encodedSizes,
                 encodedBlocks, blockId, includesMemstoreTS,
-                TestHFileBlockCompatibility.Writer.DUMMY_HEADER);
+                TestHFileBlockCompatibility.Writer.DUMMY_HEADER, includesTag);
 
             hbw.writeHeaderAndData(os);
             totalSize += hbw.getOnDiskSizeWithHeader();
@@ -266,8 +280,13 @@ public class TestHFileBlockCompatibility
           os.close();
 
           FSDataInputStream is = fs.open(path);
+          HFileContext meta = new HFileContext();
+          meta.setUsesHBaseChecksum(false);
+          meta.setIncludesMvcc(includesMemstoreTS);
+          meta.setIncludesTags(includesTag);
+          meta.setCompressAlgo(algo);
           HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
-              algo, totalSize, MINOR_VERSION, fs, path);
+              totalSize, fs, path, meta);
           hbr.setDataBlockEncoder(dataBlockEncoder);
           hbr.setIncludesMemstoreTS(includesMemstoreTS);
 
@@ -301,9 +320,6 @@ public class TestHFileBlockCompatibility
       }
     }
   }
-
-
-
   /**
    * This is the version of the HFileBlock.Writer that is used to
    * create V2 blocks with minor version 0. These blocks do not
@@ -392,33 +408,34 @@ public class TestHFileBlockCompatibility
     /** The offset of the previous block of the same type */
     private long prevOffset;
 
-    /** Whether we are including memstore timestamp after every key/value */
-    private boolean includesMemstoreTS;
+    private HFileContext meta;
 
     /**
      * @param compressionAlgorithm compression algorithm to use
      * @param dataBlockEncoderAlgo data block encoding algorithm to use
      */
     public Writer(Compression.Algorithm compressionAlgorithm,
-          HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS) {
+          HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS, boolean includesTag) {
       compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
       this.dataBlockEncoder = dataBlockEncoder != null
           ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
 
-      defaultBlockEncodingCtx =
-          new HFileBlockDefaultEncodingContext(compressionAlgorithm,
-              null, DUMMY_HEADER);
+      meta = new HFileContext();
+      meta.setUsesHBaseChecksum(false);
+      meta.setIncludesMvcc(includesMemstoreTS);
+      meta.setIncludesTags(includesTag);
+      meta.setCompressAlgo(compressionAlgorithm);
+      
+      defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
       dataBlockEncodingCtx =
-        this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
-            compressionAlgorithm, DUMMY_HEADER);
-
+          this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
+              DUMMY_HEADER, meta);
       baosInMemory = new ByteArrayOutputStream();
 
       prevOffsetByType = new long[BlockType.values().length];
       for (int i = 0; i < prevOffsetByType.length; ++i)
         prevOffsetByType[i] = -1;
 
-      this.includesMemstoreTS = includesMemstoreTS;
     }
 
     /**
@@ -521,8 +538,7 @@ public class TestHFileBlockCompatibility
               uncompressedBytesWithHeader.length - HEADER_SIZE).slice();
 
       //do the encoding
-      dataBlockEncoder.beforeWriteToDisk(rawKeyValues,
-              includesMemstoreTS, dataBlockEncodingCtx, blockType);
+      dataBlockEncoder.beforeWriteToDisk(rawKeyValues, dataBlockEncodingCtx, blockType);
 
       uncompressedBytesWithHeader =
           dataBlockEncodingCtx.getUncompressedBytesWithHeader();
@@ -714,11 +730,13 @@ public class TestHFileBlockCompatibility
      * Creates a new HFileBlock.
      */
     public HFileBlock getBlockForCaching() {
+      meta.setUsesHBaseChecksum(false);
+      meta.setChecksumType(ChecksumType.NULL);
+      meta.setBytesPerChecksum(0);
       return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
           getUncompressedSizeWithoutHeader(), prevOffset,
-          getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
-          includesMemstoreTS, MINOR_VERSION, 0, ChecksumType.NULL.getCode(),
-          getOnDiskSizeWithoutHeader());
+          getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset, 
+          getOnDiskSizeWithoutHeader(), meta);
     }
   }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java Sat Sep 21 18:01:32 2013
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.fs.HFileSystem;
@@ -118,9 +119,27 @@ public class TestHFileBlockIndex {
 
   @Test
   public void testBlockIndex() throws IOException {
-    path = new Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr);
-    writeWholeIndex();
-    readIndex();
+    testBlockIndexInternals(false);
+    clear();
+    testBlockIndexInternals(true);
+  }
+
+  private void clear() throws IOException {
+    keys.clear();
+    rand = new Random(2389757);
+    firstKeyInFile = null;
+    conf = TEST_UTIL.getConfiguration();
+
+    // This test requires at least HFile format version 2.
+    conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
+
+    fs = HFileSystem.get(conf);
+  }
+
+  protected void testBlockIndexInternals(boolean useTags) throws IOException {
+    path = new Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr + useTags);
+    writeWholeIndex(useTags);
+    readIndex(useTags);
   }
 
   /**
@@ -164,13 +183,18 @@ public class TestHFileBlockIndex {
     }
   }
 
-  public void readIndex() throws IOException {
+  public void readIndex(boolean useTags) throws IOException {
     long fileSize = fs.getFileStatus(path).getLen();
     LOG.info("Size of " + path + ": " + fileSize);
 
     FSDataInputStream istream = fs.open(path);
-    HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream,
-        compr, fs.getFileStatus(path).getLen());
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(true);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(useTags);
+    meta.setCompressAlgo(compr);
+    HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
+        .getLen(), meta);
 
     BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
     HFileBlockIndex.BlockIndexReader indexReader =
@@ -215,11 +239,17 @@ public class TestHFileBlockIndex {
     istream.close();
   }
 
-  private void writeWholeIndex() throws IOException {
+  private void writeWholeIndex(boolean useTags) throws IOException {
     assertEquals(0, keys.size());
-    HFileBlock.Writer hbw = new HFileBlock.Writer(compr, null,
-        includesMemstoreTS, HFile.DEFAULT_CHECKSUM_TYPE,
-        HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(true);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(useTags);
+    meta.setCompressAlgo(compr);
+    meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
+    meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+        meta);
     FSDataOutputStream outputStream = fs.create(path);
     HFileBlockIndex.BlockIndexWriter biw =
         new HFileBlockIndex.BlockIndexWriter(hbw, null, null);
@@ -486,11 +516,13 @@ public class TestHFileBlockIndex {
 
       // Write the HFile
       {
+        HFileContext meta = new HFileContext();
+        meta.setBlocksize(SMALL_BLOCK_SIZE);
+        meta.setCompressAlgo(compr);
         HFile.Writer writer =
             HFile.getWriterFactory(conf, cacheConf)
                 .withPath(fs, hfilePath)
-                .withBlockSize(SMALL_BLOCK_SIZE)
-                .withCompression(compr)
+                .withFileContext(meta)
                 .create();
         Random rand = new Random(19231737);
 
@@ -502,7 +534,7 @@ public class TestHFileBlockIndex {
               row, 0, 0).getKey();
 
           byte[] v = TestHFileWriterV2.randomValue(rand);
-          writer.append(k, v);
+          writer.append(k, v, HConstants.EMPTY_BYTE_ARRAY);
           keys[i] = k;
           values[i] = v;
           keyStrSet.add(Bytes.toStringBinary(k));

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java?rev=1525269&r1=1525268&r2=1525269&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java Sat Sep 21 18:01:32 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
@@ -80,7 +81,12 @@ public class TestHFileDataBlockEncoder {
    */
   @Test
   public void testEncodingWithCache() {
-    HFileBlock block = getSampleHFileBlock();
+    testEncodingWithCacheInternals(false);
+    testEncodingWithCacheInternals(true);
+  }
+
+  private void testEncodingWithCacheInternals(boolean useTag) {
+    HFileBlock block = getSampleHFileBlock(useTag);
     LruBlockCache blockCache =
         new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
     HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
@@ -107,36 +113,47 @@ public class TestHFileDataBlockEncoder {
   /** Test for HBASE-5746. */
   @Test
   public void testHeaderSizeInCacheWithoutChecksum() throws Exception {
+    testHeaderSizeInCacheWithoutChecksumInternals(false);
+    testHeaderSizeInCacheWithoutChecksumInternals(true);
+  }
+
+  private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
     int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
     // Create some KVs and create the block with old-style header.
     ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
-        generator.generateTestKeyValues(60), includesMemstoreTS);
+        generator.generateTestKeyValues(60, useTags), includesMemstoreTS);
     int size = keyValues.limit();
     ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
     buf.position(headerSize);
     keyValues.rewind();
     buf.put(keyValues);
+    HFileContext meta = new HFileContext();
+    meta.setUsesHBaseChecksum(false);
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(useTags);
+    meta.setCompressAlgo(Compression.Algorithm.NONE);
+    meta.setBlocksize(0);
+    meta.setChecksumType(ChecksumType.NULL);
     HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
-        HFileBlock.FILL_HEADER, 0, includesMemstoreTS,
-        HFileBlock.MINOR_VERSION_NO_CHECKSUM, 0, ChecksumType.NULL.getCode(), 0);
-    HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(createBlockOnDisk(block), false);
+        HFileBlock.FILL_HEADER, 0,
+        0, meta);
+    HFileBlock cacheBlock = blockEncoder
+        .diskToCacheFormat(createBlockOnDisk(block, useTags), false);
     assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
   }
 
-  private HFileBlock createBlockOnDisk(HFileBlock block) throws IOException {
+  private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
     int size;
     HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
-        Compression.Algorithm.NONE, blockEncoder.getEncodingOnDisk(),
-        HConstants.HFILEBLOCK_DUMMY_HEADER);
+        blockEncoder.getEncodingOnDisk(),
+        HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
     context.setDummyHeader(block.getDummyHeaderForVersion());
-    blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
-            includesMemstoreTS, context, block.getBlockType());
+    blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
     byte[] encodedBytes = context.getUncompressedBytesWithHeader();
     size = encodedBytes.length - block.getDummyHeaderForVersion().length;
     return new HFileBlock(context.getBlockType(), size, size, -1,
-            ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0, includesMemstoreTS,
-            block.getMinorVersion(), block.getBytesPerChecksum(), block.getChecksumType(),
-            block.getOnDiskDataSizeWithHeader());
+            ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
+            block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
   }
 
   /**
@@ -145,9 +162,14 @@ public class TestHFileDataBlockEncoder {
    */
   @Test
   public void testEncodingWritePath() throws IOException {
+    testEncodingWritePathInternals(false);
+    testEncodingWritePathInternals(true);
+  }
+
+  private void testEncodingWritePathInternals(boolean useTag) throws IOException {
     // usually we have just block without headers, but don't complicate that
-    HFileBlock block = getSampleHFileBlock();
-    HFileBlock blockOnDisk = createBlockOnDisk(block);
+    HFileBlock block = getSampleHFileBlock(useTag);
+    HFileBlock blockOnDisk = createBlockOnDisk(block, useTag);
 
     if (blockEncoder.getEncodingOnDisk() !=
         DataBlockEncoding.NONE) {
@@ -164,21 +186,33 @@ public class TestHFileDataBlockEncoder {
    */
   @Test
   public void testEncodingReadPath() {
-    HFileBlock origBlock = getSampleHFileBlock();
+    testEncodingReadPathInternals(false);
+    testEncodingReadPathInternals(true);
+  }
+
+  private void testEncodingReadPathInternals(boolean useTag) {
+    HFileBlock origBlock = getSampleHFileBlock(useTag);
     blockEncoder.diskToCacheFormat(origBlock, false);
   }
 
-  private HFileBlock getSampleHFileBlock() {
+  private HFileBlock getSampleHFileBlock(boolean useTag) {
     ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
-        generator.generateTestKeyValues(60), includesMemstoreTS);
+        generator.generateTestKeyValues(60, useTag), includesMemstoreTS);
     int size = keyValues.limit();
     ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
     buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
     keyValues.rewind();
     buf.put(keyValues);
+    HFileContext meta = new HFileContext();
+    meta.setIncludesMvcc(includesMemstoreTS);
+    meta.setIncludesTags(useTag);
+    meta.setUsesHBaseChecksum(true);
+    meta.setCompressAlgo(Algorithm.NONE);
+    meta.setBlocksize(0);
+    meta.setChecksumType(ChecksumType.NULL);
     HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
-        HFileBlock.FILL_HEADER, 0, includesMemstoreTS, 
-        HFileReaderV2.MAX_MINOR_VERSION, 0, ChecksumType.NULL.getCode(), 0);
+        HFileBlock.FILL_HEADER, 0, 
+         0, meta);
     return b;
   }
 



Mime
View raw message