hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ramkris...@apache.org
Subject [1/2] hbase git commit: HBASE-13451 - Make the HFileBlockIndex blockKeys to Cells so that it could be easy to use in the CellComparators (Ram)
Date Tue, 09 Jun 2015 06:33:46 GMT
Repository: hbase
Updated Branches:
  refs/heads/master c62b396f9 -> 487e4aa74


http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java
deleted file mode 100644
index 93c6a7b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.Queue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.io.hfile.BlockType;
-import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
-import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter;
-import org.apache.hadoop.io.Writable;
-
-/**
- * Adds methods required for writing a compound Bloom filter to the data
- * section of an {@link org.apache.hadoop.hbase.io.hfile.HFile} to the
- * {@link CompoundBloomFilter} class.
- */
-@InterfaceAudience.Private
-public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
-    implements BloomFilterWriter, InlineBlockWriter {
-
-  private static final Log LOG =
-    LogFactory.getLog(CompoundBloomFilterWriter.class);
-
-  /** The current chunk being written to */
-  private BloomFilterChunk chunk;
-
-  /** Previous chunk, so that we can create another similar chunk */
-  private BloomFilterChunk prevChunk;
-
-  /** Maximum fold factor */
-  private int maxFold;
-
-  /** The size of individual Bloom filter chunks to create */
-  private int chunkByteSize;
-
-  /** A Bloom filter chunk enqueued for writing */
-  private static class ReadyChunk {
-    int chunkId;
-    byte[] firstKey;
-    BloomFilterChunk chunk;
-  }
-
-  private Queue<ReadyChunk> readyChunks = new LinkedList<ReadyChunk>();
-
-  /** The first key in the current Bloom filter chunk. */
-  private byte[] firstKeyInChunk = null;
-
-  private HFileBlockIndex.BlockIndexWriter bloomBlockIndexWriter =
-      new HFileBlockIndex.BlockIndexWriter();
-
-  /** Whether to cache-on-write compound Bloom filter chunks */
-  private boolean cacheOnWrite;
-
-  /**
-   * @param chunkByteSizeHint
-   *          each chunk's size in bytes. The real chunk size might be different
-   *          as required by the fold factor.
-   * @param errorRate
-   *          target false positive rate
-   * @param hashType
-   *          hash function type to use
-   * @param maxFold
-   *          maximum degree of folding allowed
-   */
-  public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate,
-      int hashType, int maxFold, boolean cacheOnWrite,
-      CellComparator comparator) {
-    chunkByteSize = BloomFilterUtil.computeFoldableByteSize(
-        chunkByteSizeHint * 8L, maxFold);
-
-    this.errorRate = errorRate;
-    this.hashType = hashType;
-    this.maxFold = maxFold;
-    this.cacheOnWrite = cacheOnWrite;
-    this.comparator = comparator;
-  }
-
-  @Override
-  public boolean shouldWriteBlock(boolean closing) {
-    enqueueReadyChunk(closing);
-    return !readyChunks.isEmpty();
-  }
-
-  /**
-   * Enqueue the current chunk if it is ready to be written out.
-   *
-   * @param closing true if we are closing the file, so we do not expect new
-   *        keys to show up
-   */
-  private void enqueueReadyChunk(boolean closing) {
-    if (chunk == null ||
-        (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) {
-      return;
-    }
-
-    if (firstKeyInChunk == null) {
-      throw new NullPointerException("Trying to enqueue a chunk, " +
-          "but first key is null: closing=" + closing + ", keyCount=" +
-          chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys());
-    }
-
-    ReadyChunk readyChunk = new ReadyChunk();
-    readyChunk.chunkId = numChunks - 1;
-    readyChunk.chunk = chunk;
-    readyChunk.firstKey = firstKeyInChunk;
-    readyChunks.add(readyChunk);
-
-    long prevMaxKeys = chunk.getMaxKeys();
-    long prevByteSize = chunk.getByteSize();
-
-    chunk.compactBloom();
-
-    if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) {
-      LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from ["
-          + prevMaxKeys + " max keys, " + prevByteSize + " bytes] to ["
-          + chunk.getMaxKeys() + " max keys, " + chunk.getByteSize()
-          + " bytes]");
-    }
-
-    totalMaxKeys += chunk.getMaxKeys();
-    totalByteSize += chunk.getByteSize();
-
-    firstKeyInChunk = null;
-    prevChunk = chunk;
-    chunk = null;
-  }
-
-  /**
-   * Adds a Bloom filter key. This key must be greater than the previous key,
-   * as defined by the comparator this compound Bloom filter is configured
-   * with. For efficiency, key monotonicity is not checked here. See
-   * {@link org.apache.hadoop.hbase.regionserver.StoreFile.Writer#append(
-   * org.apache.hadoop.hbase.Cell)} for the details of deduplication.
-   */
-  @Override
-  public void add(byte[] bloomKey, int keyOffset, int keyLength) {
-    if (bloomKey == null)
-      throw new NullPointerException();
-
-    enqueueReadyChunk(false);
-
-    if (chunk == null) {
-      if (firstKeyInChunk != null) {
-        throw new IllegalStateException("First key in chunk already set: "
-            + Bytes.toStringBinary(firstKeyInChunk));
-      }
-      firstKeyInChunk = Arrays.copyOfRange(bloomKey, keyOffset, keyOffset
-          + keyLength);
-
-      if (prevChunk == null) {
-        // First chunk
-        chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate,
-            hashType, maxFold);
-      } else {
-        // Use the same parameters as the last chunk, but a new array and
-        // a zero key count.
-        chunk = prevChunk.createAnother();
-      }
-
-      if (chunk.getKeyCount() != 0) {
-        throw new IllegalStateException("keyCount=" + chunk.getKeyCount()
-            + " > 0");
-      }
-
-      chunk.allocBloom();
-      ++numChunks;
-    }
-
-    chunk.add(bloomKey, keyOffset, keyLength);
-    ++totalKeyCount;
-  }
-
-  @Override
-  public void writeInlineBlock(DataOutput out) throws IOException {
-    // We don't remove the chunk from the queue here, because we might need it
-    // again for cache-on-write.
-    ReadyChunk readyChunk = readyChunks.peek();
-
-    BloomFilterChunk readyChunkBloom = readyChunk.chunk;
-    readyChunkBloom.writeBloom(out);
-  }
-
-  @Override
-  public void blockWritten(long offset, int onDiskSize, int uncompressedSize) {
-    ReadyChunk readyChunk = readyChunks.remove();
-    bloomBlockIndexWriter.addEntry(readyChunk.firstKey, offset, onDiskSize);
-  }
-
-  @Override
-  public BlockType getInlineBlockType() {
-    return BlockType.BLOOM_CHUNK;
-  }
-
-  private class MetaWriter implements Writable {
-    protected MetaWriter() {}
-
-    @Override
-    public void readFields(DataInput in) throws IOException {
-      throw new IOException("Cant read with this class.");
-    }
-
-    /**
-     * This is modeled after {@link BloomFilterChunk.MetaWriter} for simplicity,
-     * although the two metadata formats do not have to be consistent. This
-     * does have to be consistent with how {@link
-     * CompoundBloomFilter#CompoundBloomFilter(DataInput,
-     * org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields.
-     */
-    @Override
-    public void write(DataOutput out) throws IOException {
-      out.writeInt(VERSION);
-
-      out.writeLong(getByteSize());
-      out.writeInt(prevChunk.getHashCount());
-      out.writeInt(prevChunk.getHashType());
-      out.writeLong(getKeyCount());
-      out.writeLong(getMaxKeys());
-
-      // Fields that don't have equivalents in ByteBloomFilter.
-      out.writeInt(numChunks);
-      if (comparator != null) {
-        Bytes.writeByteArray(out, Bytes.toBytes(comparator.getClass().getName()));
-      } else {
-        // Internally writes a 0 vint if the byte[] is null
-        Bytes.writeByteArray(out, null);
-      }
-
-      // Write a single-level index without compression or block header.
-      bloomBlockIndexWriter.writeSingleLevelIndex(out, "Bloom filter");
-    }
-  }
-
-  @Override
-  public void compactBloom() {
-  }
-
-  @Override
-  public Writable getMetaWriter() {
-    return new MetaWriter();
-  }
-
-  @Override
-  public Writable getDataWriter() {
-    return null;
-  }
-
-  @Override
-  public boolean getCacheOnWrite() {
-    return cacheOnWrite;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 0682786..3e164ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -776,8 +776,8 @@ public class HBaseFsck extends Configured implements Closeable {
                   getConf()), getConf());
               if ((reader.getFirstKey() != null)
                   && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
-                      reader.getFirstKey()) > 0))) {
-                storeFirstKey = reader.getFirstKey();
+                      ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey()).getKey()) > 0)))
{
+                storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey()).getKey();
               }
               if ((reader.getLastKey() != null)
                   && ((storeLastKey == null) || (comparator.compare(storeLastKey,
@@ -790,7 +790,7 @@ public class HBaseFsck extends Configured implements Closeable {
         }
         currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
         currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
-        currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
+        currentRegionBoundariesInformation.storesFirstKey = storeFirstKey;
         currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
         if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
           currentRegionBoundariesInformation.metaFirstKey = null;
@@ -879,7 +879,7 @@ public class HBaseFsck extends Configured implements Closeable {
           CacheConfig cacheConf = new CacheConfig(getConf());
           hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf());
           hf.loadFileInfo();
-          KeyValue startKv = KeyValueUtil.createKeyValueFromKey(hf.getFirstKey());
+          Cell startKv = hf.getFirstKey();
           start = startKv.getRow();
           KeyValue endKv = KeyValueUtil.createKeyValueFromKey(hf.getLastKey());
           end = endKv.getRow();

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
index 7e2f1c0..9b99502 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
@@ -101,9 +101,8 @@ public class TestHalfStoreFileReader {
 
     HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
     r.loadFileInfo();
-    byte [] midkey = r.midkey();
-    KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey);
-    midkey = midKV.getRow();
+    Cell midKV = r.midkey();
+    byte[] midkey = ((KeyValue.KeyOnlyKeyValue)midKV).getRow();
 
     //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
 
@@ -167,9 +166,8 @@ public class TestHalfStoreFileReader {
 
       HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
       r.loadFileInfo();
-      byte[] midkey = r.midkey();
-      KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey);
-      midkey = midKV.getRow();
+      Cell midKV = r.midkey();
+      byte[] midkey = ((KeyValue.KeyOnlyKeyValue)midKV).getRow();
 
       Reference bottom = new Reference(midkey, Reference.Range.bottom);
       Reference top = new Reference(midkey, Reference.Range.top);
@@ -217,7 +215,7 @@ public class TestHalfStoreFileReader {
       assertNull(foundKeyValue);
     }
 
-  private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, KeyValue seekBefore,
+  private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore,
                                         CacheConfig cacheConfig)
             throws IOException {
       final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p,

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 8891a6a..a657c21 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -205,7 +205,7 @@ public class TestHFileBlockIndex {
 
     BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
     HFileBlockIndex.BlockIndexReader indexReader =
-        new HFileBlockIndex.BlockIndexReader(
+        new HFileBlockIndex.CellBasedKeyBlockIndexReader(
             CellComparator.COMPARATOR, numLevels, brw);
 
     indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
@@ -493,16 +493,17 @@ public class TestHFileBlockIndex {
     long expected = ClassSize.estimateBase(cl, false);
 
     HFileBlockIndex.BlockIndexReader bi =
-        new HFileBlockIndex.BlockIndexReader(null, 1);
+        new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
     long actual = bi.heapSize();
 
     // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
     // int [] blockDataSizes) are all null they are not going to show up in the
     // HeapSize calculation, so need to remove those array costs from expected.
-    expected -= ClassSize.align(3 * ClassSize.ARRAY);
+    // Already the block keys are not there in this case
+    expected -= ClassSize.align(2 * ClassSize.ARRAY);
 
     if (expected != actual) {
-      ClassSize.estimateBase(cl, true);
+      expected = ClassSize.estimateBase(cl, true);
       assertEquals(expected, actual);
     }
   }
@@ -574,7 +575,7 @@ public class TestHFileBlockIndex {
       assertEquals(expectedNumLevels,
           reader.getTrailer().getNumDataIndexLevels());
 
-      assertTrue(Bytes.equals(keys[0], reader.getFirstKey()));
+      assertTrue(Bytes.equals(keys[0], ((KeyValue)reader.getFirstKey()).getKey()));
       assertTrue(Bytes.equals(keys[NUM_KV - 1], reader.getLastKey()));
       LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1]));
 
@@ -631,7 +632,7 @@ public class TestHFileBlockIndex {
       // Validate the mid-key.
       assertEquals(
           Bytes.toStringBinary(blockKeys.get((blockKeys.size() - 1) / 2)),
-          Bytes.toStringBinary(reader.midkey()));
+          reader.midkey());
 
       assertEquals(UNCOMPRESSED_INDEX_SIZES[testI],
           reader.getTrailer().getUncompressedDataIndexSize());

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
index b19ec0d..d3153fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
@@ -187,7 +187,7 @@ public class TestHFileSeek extends TestCase {
         fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
     reader.loadFileInfo();
     KeySampler kSampler =
-        new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
+        new KeySampler(rng, ((KeyValue)reader.getFirstKey()).getKey(), reader.getLastKey(),
             keyLenGen);
     HFileScanner scanner = reader.getScanner(false, USE_PREAD);
     BytesWritable key = new BytesWritable();

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
index 232d1f7..883f60e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
@@ -148,11 +149,10 @@ public class TestHFileWriterV2 {
     // Comparator class name is stored in the trailer in version 2.
     CellComparator comparator = trailer.createComparator();
     HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
-        new HFileBlockIndex.BlockIndexReader(comparator,
+        new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
             trailer.getNumDataIndexLevels());
     HFileBlockIndex.BlockIndexReader metaBlockIndexReader =
-        new HFileBlockIndex.BlockIndexReader(
-            null, 1);
+        new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
 
     HFileBlock.BlockIterator blockIter = blockReader.blockRange(
         trailer.getLoadOnOpenDataOffset(),
@@ -164,7 +164,7 @@ public class TestHFileWriterV2 {
         trailer.getDataIndexCount());
     
     if (findMidKey) {
-      byte[] midkey = dataBlockIndexReader.midkey();
+      Cell midkey = dataBlockIndexReader.midkey();
       assertNotNull("Midkey should not be null", midkey);
     }
     

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
index 37f83b1..e9ba089 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -179,11 +180,10 @@ public class TestHFileWriterV3 {
  // Comparator class name is stored in the trailer in version 2.
     CellComparator comparator = trailer.createComparator();
     HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
-        new HFileBlockIndex.BlockIndexReader(comparator,
+        new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
             trailer.getNumDataIndexLevels());
     HFileBlockIndex.BlockIndexReader metaBlockIndexReader =
-        new HFileBlockIndex.BlockIndexReader(
-            null, 1);
+        new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
 
     HFileBlock.BlockIterator blockIter = blockReader.blockRange(
         trailer.getLoadOnOpenDataOffset(),
@@ -194,7 +194,7 @@ public class TestHFileWriterV3 {
         blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount());
     
     if (findMidKey) {
-      byte[] midkey = dataBlockIndexReader.midkey();
+      Cell midkey = dataBlockIndexReader.midkey();
       assertNotNull("Midkey should not be null", midkey);
     }
     

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index ce40515..fce81fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -46,15 +46,14 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter;
+import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CompoundBloomFilter;
-import org.apache.hadoop.hbase.util.CompoundBloomFilterBase;
-import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
 import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hbase/blob/487e4aa7/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 54f79f4..499e57c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -170,7 +170,7 @@ public class TestStoreFile extends HBaseTestCase {
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
     // timestamp.
-    KeyValue kv = KeyValueUtil.createKeyValueFromKey(reader.midkey());
+    Cell kv = reader.midkey();
     byte [] midRow = kv.getRow();
     kv = KeyValueUtil.createKeyValueFromKey(reader.getLastKey());
     byte [] finalRow = kv.getRow();
@@ -314,8 +314,8 @@ public class TestStoreFile extends HBaseTestCase {
 
   private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f)
       throws IOException {
-    byte [] midkey = f.createReader().midkey();
-    KeyValue midKV = KeyValueUtil.createKeyValueFromKey(midkey);
+    Cell midkey = f.createReader().midkey();
+    KeyValue midKV = (KeyValue)midkey;
     byte [] midRow = midKV.getRow();
     // Create top split.
     HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(),
@@ -332,7 +332,7 @@ public class TestStoreFile extends HBaseTestCase {
       this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + midKV.toString());
-    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
+    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midKV.getKey());
     try {
       // Now make two HalfMapFiles and assert they can read the full backing
       // file, one from the top and the other from the bottom.
@@ -348,7 +348,7 @@ public class TestStoreFile extends HBaseTestCase {
         if ((topScanner.getReader().getComparator().compare(midKV, key.array(),
           key.arrayOffset(), key.limit())) > 0) {
           fail("key=" + Bytes.toStringBinary(key) + " < midkey=" +
-              Bytes.toStringBinary(midkey));
+              midkey);
         }
         if (first) {
           first = false;


Mime
View raw message