hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zhang...@apache.org
Subject [05/11] hbase git commit: HBASE-18825 Use HStoreFile instead of StoreFile in our own code base and remove unnecessary methods in StoreFile interface
Date Mon, 25 Sep 2017 01:58:47 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
deleted file mode 100644
index 6fa951e..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.OptionalLong;
-import java.util.TreeMap;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/** A mock used so our tests don't deal with actual StoreFiles */
-public class MockStoreFile extends HStoreFile {
-  long length = 0;
-  boolean isRef = false;
-  long ageInDisk;
-  long sequenceid;
-  private Map<byte[], byte[]> metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-  byte[] splitPoint = null;
-  TimeRangeTracker timeRangeTracker;
-  long entryCount;
-  boolean isMajor;
-  HDFSBlocksDistribution hdfsBlocksDistribution;
-  long modificationTime;
-  boolean compactedAway;
-
-  MockStoreFile(HBaseTestingUtility testUtil, Path testPath,
-      long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
-    super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
-        new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true);
-    this.length = length;
-    this.isRef = isRef;
-    this.ageInDisk = ageInDisk;
-    this.sequenceid = sequenceid;
-    this.isMajor = false;
-    hdfsBlocksDistribution = new HDFSBlocksDistribution();
-    hdfsBlocksDistribution.addHostsAndBlockWeight(
-      new String[] { RSRpcServices.getHostname(testUtil.getConfiguration(), false) }, 1);
-    modificationTime = EnvironmentEdgeManager.currentTime();
-  }
-
-  void setLength(long newLen) {
-    this.length = newLen;
-  }
-
-  @Override
-  public long getMaxSequenceId() {
-    return sequenceid;
-  }
-
-  @Override
-  public boolean isMajorCompactionResult() {
-    return isMajor;
-  }
-
-  public void setIsMajor(boolean isMajor) {
-    this.isMajor = isMajor;
-  }
-
-  @Override
-  public boolean isReference() {
-    return this.isRef;
-  }
-
-  @Override
-  public boolean isBulkLoadResult() {
-    return false;
-  }
-
-  @Override
-  public byte[] getMetadataValue(byte[] key) {
-    return this.metadata.get(key);
-  }
-
-  public void setMetadataValue(byte[] key, byte[] value) {
-    this.metadata.put(key, value);
-  }
-
-  void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
-    this.timeRangeTracker = timeRangeTracker;
-  }
-
-  void setEntries(long entryCount) {
-    this.entryCount = entryCount;
-  }
-
-  public OptionalLong getMinimumTimestamp() {
-    return timeRangeTracker == null ? OptionalLong.empty()
-        : OptionalLong.of(timeRangeTracker.getMin());
-  }
-
-  public OptionalLong getMaximumTimestamp() {
-    return timeRangeTracker == null ? OptionalLong.empty()
-        : OptionalLong.of(timeRangeTracker.getMax());
-  }
-
-  @Override
-  public void markCompactedAway() {
-    this.compactedAway = true;
-  }
-
-  @Override
-  public boolean isCompactedAway() {
-    return compactedAway;
-  }
-
-  @Override
-  public long getModificationTimeStamp() {
-    return modificationTime;
-  }
-
-  @Override
-  public HDFSBlocksDistribution getHDFSBlockDistribution() {
-    return hdfsBlocksDistribution;
-  }
-
-  @Override
-  public void initReader() throws IOException {
-  }
-
-  @Override
-  public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder,
-      boolean canOptimizeForNonNullColumn) {
-    return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder,
-      canOptimizeForNonNullColumn);
-  }
-
-  @Override
-  public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-      boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
-      throws IOException {
-    return getReader().getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder,
-      canOptimizeForNonNullColumn);
-  }
-
-  @Override
-  public StoreFileReader getReader() {
-    final long len = this.length;
-    final TimeRangeTracker timeRangeTracker = this.timeRangeTracker;
-    final long entries = this.entryCount;
-    return new StoreFileReader() {
-      @Override
-      public long length() {
-        return len;
-      }
-
-      @Override
-      public long getMaxTimestamp() {
-        return timeRange == null? Long.MAX_VALUE: timeRangeTracker.getMax();
-      }
-
-      @Override
-      public long getEntries() {
-        return entries;
-      }
-
-      @Override
-      public void close(boolean evictOnClose) throws IOException {
-        // no-op
-      }
-
-      @Override
-      public Cell getLastKey() {
-        if (splitPoint != null) {
-          return CellUtil.createCell(Arrays.copyOf(splitPoint, splitPoint.length + 1));
-        } else {
-          return null;
-        }
-      }
-
-      @Override
-      public Cell midkey() throws IOException {
-        if (splitPoint != null) {
-          return CellUtil.createCell(splitPoint);
-        } else {
-          return null;
-        }
-      }
-
-      @Override
-      public Cell getFirstKey() {
-        if (splitPoint != null) {
-          return CellUtil.createCell(Arrays.copyOf(splitPoint, splitPoint.length - 1));
-        } else {
-          return null;
-        }
-      }
-    };
-  }
-
-  @Override
-  public OptionalLong getBulkLoadTimestamp() {
-    // we always return false for isBulkLoadResult so we do not have a bulk load timestamp
-    return OptionalLong.empty();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
index 36c2e19..eecc069 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 
 /**
  * RegionObserver that just reimplements the default behavior,
@@ -51,7 +50,7 @@ public class NoOpScanPolicyObserver implements RegionObserver {
     ScanInfo oldSI = store.getScanInfo();
     ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getColumnFamilyDescriptor(),
         oldSI.getTtl(), oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
-    return new StoreScanner(store, scanInfo, OptionalInt.empty(), scanners,
+    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), scanners,
         ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
   }
 
@@ -67,7 +66,7 @@ public class NoOpScanPolicyObserver implements RegionObserver {
     ScanInfo oldSI = store.getScanInfo();
     ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getColumnFamilyDescriptor(),
         oldSI.getTtl(), oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
-    return new StoreScanner(store, scanInfo, OptionalInt.empty(), scanners, scanType,
+    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), scanners, scanType,
         store.getSmallestReadPoint(), earliestPutTs);
   }
 
@@ -76,10 +75,10 @@ public class NoOpScanPolicyObserver implements RegionObserver {
       Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPoint)
       throws IOException {
     Region r = c.getEnvironment().getRegion();
-    return scan.isReversed() ? new ReversedStoreScanner(store,
-        store.getScanInfo(), scan, targetCols, r.getReadPoint(scan
-            .getIsolationLevel())) : new StoreScanner(store,
-        store.getScanInfo(), scan, targetCols, r.getReadPoint(scan
-            .getIsolationLevel()));
+    return scan.isReversed()
+        ? new ReversedStoreScanner((HStore) store, store.getScanInfo(), scan, targetCols,
+            r.getReadPoint(scan.getIsolationLevel()))
+        : new StoreScanner((HStore) store, store.getScanInfo(), scan, targetCols,
+            r.getReadPoint(scan.getIsolationLevel()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index 8fad157..2d08e50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -218,7 +218,7 @@ public class TestCacheOnWriteInSchema {
   private void readStoreFile(Path path) throws IOException {
     CacheConfig cacheConf = store.getCacheConfig();
     BlockCache cache = cacheConf.getBlockCache();
-    StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
+    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
     sf.initReader();
     HFile.Reader reader = sf.getReader().getHFileReader();
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 3649823..356054e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY_BYTES;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
@@ -226,7 +227,7 @@ public class TestCompaction {
 
   private int count() throws IOException {
     int count = 0;
-    for (StoreFile f: this.r.stores.
+    for (HStoreFile f: this.r.stores.
         get(COLUMN_FAMILY_TEXT).getStorefiles()) {
       HFileScanner scanner = f.getReader().getScanner(false, false);
       if (!scanner.seekTo()) {
@@ -255,9 +256,9 @@ public class TestCompaction {
     for (int i = 0; i < nfiles; i++) {
       createStoreFile(r);
     }
-    HStore store = (HStore) r.getStore(COLUMN_FAMILY);
+    HStore store = r.getStore(COLUMN_FAMILY);
 
-    Collection<StoreFile> storeFiles = store.getStorefiles();
+    Collection<HStoreFile> storeFiles = store.getStorefiles();
     DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
     tool.compactForTesting(storeFiles, false);
 
@@ -276,8 +277,8 @@ public class TestCompaction {
     } catch (Exception e) {
       // The complete compaction should fail and the corrupt file should remain
       // in the 'tmp' directory;
-      assert (fs.exists(origPath));
-      assert (!fs.exists(dstPath));
+      assertTrue(fs.exists(origPath));
+      assertFalse(fs.exists(dstPath));
       System.out.println("testCompactionWithCorruptResult Passed");
       return;
     }
@@ -389,8 +390,8 @@ public class TestCompaction {
   }
 
   private class StoreMockMaker extends StatefulStoreMockMaker {
-    public ArrayList<StoreFile> compacting = new ArrayList<>();
-    public ArrayList<StoreFile> notCompacting = new ArrayList<>();
+    public ArrayList<HStoreFile> compacting = new ArrayList<>();
+    public ArrayList<HStoreFile> notCompacting = new ArrayList<>();
     private ArrayList<Integer> results;
 
     public StoreMockMaker(ArrayList<Integer> results) {
@@ -398,19 +399,21 @@ public class TestCompaction {
     }
 
     public class TestCompactionContext extends CompactionContext {
-      private List<StoreFile> selectedFiles;
-      public TestCompactionContext(List<StoreFile> selectedFiles) {
+
+      private List<HStoreFile> selectedFiles;
+
+      public TestCompactionContext(List<HStoreFile> selectedFiles) {
         super();
         this.selectedFiles = selectedFiles;
       }
 
       @Override
-      public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
+      public List<HStoreFile> preSelect(List<HStoreFile> filesCompacting) {
         return new ArrayList<>();
       }
 
       @Override
-      public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction,
+      public boolean select(List<HStoreFile> filesCompacting, boolean isUserCompaction,
           boolean mayUseOffPeak, boolean forceMajor) throws IOException {
         this.request = new CompactionRequest(selectedFiles);
         this.request.setPriority(getPriority());
@@ -445,7 +448,7 @@ public class TestCompaction {
       notCompacting.addAll(ctx.selectedFiles);
     }
 
-    public synchronized void finishCompaction(List<StoreFile> sfs) {
+    public synchronized void finishCompaction(List<HStoreFile> sfs) {
       if (sfs.isEmpty()) return;
       synchronized (results) {
         results.add(sfs.size());
@@ -466,7 +469,9 @@ public class TestCompaction {
       public volatile boolean isInCompact = false;
 
       public void unblock() {
-        synchronized (this) { this.notifyAll(); }
+        synchronized (this) {
+          this.notifyAll();
+        }
       }
 
       @Override
@@ -484,12 +489,12 @@ public class TestCompaction {
       }
 
       @Override
-      public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
+      public List<HStoreFile> preSelect(List<HStoreFile> filesCompacting) {
         return new ArrayList<>();
       }
 
       @Override
-      public boolean select(List<StoreFile> f, boolean i, boolean m, boolean e)
+      public boolean select(List<HStoreFile> f, boolean i, boolean m, boolean e)
           throws IOException {
         this.request = new CompactionRequest(new ArrayList<>());
         return true;
@@ -673,14 +678,14 @@ public class TestCompaction {
   }
 
   public static class DummyCompactor extends DefaultCompactor {
-    public DummyCompactor(Configuration conf, Store store) {
+    public DummyCompactor(Configuration conf, HStore store) {
       super(conf, store);
       this.keepSeqIdPeriod = 0;
     }
   }
 
-  private static StoreFile createFile() throws Exception {
-    StoreFile sf = mock(StoreFile.class);
+  private static HStoreFile createFile() throws Exception {
+    HStoreFile sf = mock(HStoreFile.class);
     when(sf.getPath()).thenReturn(new Path("file"));
     StoreFileReader r = mock(StoreFileReader.class);
     when(r.length()).thenReturn(10L);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
index d68f07e..2a2602c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertEquals;
@@ -89,7 +88,7 @@ public class TestCompactionArchiveConcurrentClose {
     HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(new HColumnDescriptor(fam));
     HRegionInfo info = new HRegionInfo(tableName, null, null, false);
-    Region region = initHRegion(htd, info);
+    HRegion region = initHRegion(htd, info);
     RegionServerServices rss = mock(RegionServerServices.class);
     List<Region> regions = new ArrayList<>();
     regions.add(region);
@@ -112,12 +111,12 @@ public class TestCompactionArchiveConcurrentClose {
       region.flush(true);
     }
 
-    Store store = region.getStore(fam);
+    HStore store = region.getStore(fam);
     assertEquals(fileCount, store.getStorefilesCount());
 
-    Collection<StoreFile> storefiles = store.getStorefiles();
+    Collection<HStoreFile> storefiles = store.getStorefiles();
     // None of the files should be in compacted state.
-    for (StoreFile file : storefiles) {
+    for (HStoreFile file : storefiles) {
       assertFalse(file.isCompactedAway());
     }
     // Do compaction
@@ -157,7 +156,7 @@ public class TestCompactionArchiveConcurrentClose {
     }
   }
 
-  private Region initHRegion(HTableDescriptor htd, HRegionInfo info)
+  private HRegion initHRegion(HTableDescriptor htd, HRegionInfo info)
       throws IOException {
     Configuration conf = testUtil.getConfiguration();
     Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
@@ -185,7 +184,7 @@ public class TestCompactionArchiveConcurrentClose {
     }
 
     @Override
-    public void removeStoreFiles(String familyName, Collection<StoreFile> storeFiles)
+    public void removeStoreFiles(String familyName, Collection<HStoreFile> storeFiles)
         throws IOException {
       super.removeStoreFiles(familyName, storeFiles);
       archived.set(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
index 3f1613c..1d976e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
@@ -120,24 +120,24 @@ public class TestCompactionArchiveIOException {
       region.flush(true);
     }
 
-    HStore store = (HStore) region.getStore(fam);
+    HStore store = region.getStore(fam);
     assertEquals(fileCount, store.getStorefilesCount());
 
-    Collection<StoreFile> storefiles = store.getStorefiles();
+    Collection<HStoreFile> storefiles = store.getStorefiles();
     // None of the files should be in compacted state.
-    for (StoreFile file : storefiles) {
+    for (HStoreFile file : storefiles) {
       assertFalse(file.isCompactedAway());
     }
 
     StoreFileManager fileManager = store.getStoreEngine().getStoreFileManager();
-    Collection<StoreFile> initialCompactedFiles = fileManager.getCompactedfiles();
+    Collection<HStoreFile> initialCompactedFiles = fileManager.getCompactedfiles();
     assertTrue(initialCompactedFiles == null || initialCompactedFiles.isEmpty());
 
     // Do compaction
     region.compact(true);
 
     // all prior store files should now be compacted
-    Collection<StoreFile> compactedFilesPreClean = fileManager.getCompactedfiles();
+    Collection<HStoreFile> compactedFilesPreClean = fileManager.getCompactedfiles();
     assertNotNull(compactedFilesPreClean);
     assertTrue(compactedFilesPreClean.size() > 0);
 
@@ -148,17 +148,17 @@ public class TestCompactionArchiveIOException {
     out.writeInt(1);
     out.close();
 
-    StoreFile errStoreFile = new MockStoreFile(testUtil, errFile, 1, 0, false, 1);
+    HStoreFile errStoreFile = new MockHStoreFile(testUtil, errFile, 1, 0, false, 1);
     fileManager.addCompactionResults(
-        ImmutableList.of(errStoreFile), ImmutableList.<StoreFile>of());
+        ImmutableList.of(errStoreFile), ImmutableList.of());
 
     // cleanup compacted files
     cleaner.chore();
 
     // make sure the compacted files are cleared
-    Collection<StoreFile> compactedFilesPostClean = fileManager.getCompactedfiles();
+    Collection<HStoreFile> compactedFilesPostClean = fileManager.getCompactedfiles();
     assertEquals(1, compactedFilesPostClean.size());
-    for (StoreFile origFile : compactedFilesPreClean) {
+    for (HStoreFile origFile : compactedFilesPreClean) {
       assertFalse(compactedFilesPostClean.contains(origFile));
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
index 0e4c4f9..6ae10ec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -46,6 +44,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 public class TestCompactionPolicy {
   private final static Log LOG = LogFactory.getLog(TestCompactionPolicy.class);
   protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -141,7 +141,7 @@ public class TestCompactionPolicy {
     return result;
   }
 
-  List<StoreFile> sfCreate(long... sizes) throws IOException {
+  List<HStoreFile> sfCreate(long... sizes) throws IOException {
     ArrayList<Long> ageInDisk = new ArrayList<>();
     for (int i = 0; i < sizes.length; i++) {
       ageInDisk.add(0L);
@@ -149,11 +149,11 @@ public class TestCompactionPolicy {
     return sfCreate(toArrayList(sizes), ageInDisk);
   }
 
-  List<StoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException {
+  List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException {
     return sfCreate(false, sizes, ageInDisk);
   }
 
-  List<StoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
+  List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
     ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length);
     for (int i = 0; i < sizes.length; i++) {
       ageInDisk.add(0L);
@@ -161,17 +161,17 @@ public class TestCompactionPolicy {
     return sfCreate(isReference, toArrayList(sizes), ageInDisk);
   }
 
-  List<StoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
+  List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
       throws IOException {
-    List<StoreFile> ret = Lists.newArrayList();
+    List<HStoreFile> ret = Lists.newArrayList();
     for (int i = 0; i < sizes.size(); i++) {
-      ret.add(new MockStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference,
+      ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference,
           i));
     }
     return ret;
   }
 
-  long[] getSizes(List<StoreFile> sfList) {
+  long[] getSizes(List<HStoreFile> sfList) {
     long[] aNums = new long[sfList.size()];
     for (int i = 0; i < sfList.size(); ++i) {
       aNums[i] = sfList.get(i).getReader().length();
@@ -179,23 +179,23 @@ public class TestCompactionPolicy {
     return aNums;
   }
 
-  void compactEquals(List<StoreFile> candidates, long... expected) throws IOException {
+  void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException {
     compactEquals(candidates, false, false, expected);
   }
 
-  void compactEquals(List<StoreFile> candidates, boolean forcemajor, long... expected)
+  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected)
       throws IOException {
     compactEquals(candidates, forcemajor, false, expected);
   }
 
-  void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
+  void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak,
       long... expected) throws IOException {
     store.forceMajor = forcemajor;
     // Test Default compactions
     CompactionRequest result =
         ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(
           candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
-    List<StoreFile> actual = new ArrayList<>(result.getFiles());
+    List<HStoreFile> actual = new ArrayList<>(result.getFiles());
     if (isOffPeak && !forcemajor) {
       Assert.assertTrue(result.isOffPeak());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index 26172f5..9c33d28 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -200,7 +200,7 @@ public class TestCompoundBloomFilter {
 
   private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
       Path sfPath) throws IOException {
-    StoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true);
+    HStoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true);
     sf.initReader();
     StoreFileReader r = sf.getReader();
     final boolean pread = true; // does not really matter

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index 059b850..3689cf7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -98,7 +98,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy {
       // timestamp a bit to make sure that now - lowestModTime is greater than major compaction
       // period(1ms).
       // trigger an aged major compaction
-      List<StoreFile> candidates = sfCreate(50, 25, 12, 12);
+      List<HStoreFile> candidates = sfCreate(50, 25, 12, 12);
       edge.increment(2);
       compactEquals(candidates, 50, 25, 12, 12);
       // major sure exceeding maxCompactSize also downgrades aged minors
@@ -164,10 +164,10 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy {
         oldScanInfo.getTimeToPurgeDeletes(), oldScanInfo.getComparator(), oldScanInfo.isNewVersionBehavior());
     store.setScanInfo(newScanInfo);
     // Do not compact empty store file
-    List<StoreFile> candidates = sfCreate(0);
-    for (StoreFile file : candidates) {
-      if (file instanceof MockStoreFile) {
-        MockStoreFile mockFile = (MockStoreFile) file;
+    List<HStoreFile> candidates = sfCreate(0);
+    for (HStoreFile file : candidates) {
+      if (file instanceof MockHStoreFile) {
+        MockHStoreFile mockFile = (MockHStoreFile) file;
         mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
         mockFile.setEntries(0);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
index b9982aa..df5e97a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
@@ -22,10 +22,10 @@ package org.apache.hadoop.hbase.regionserver;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
+import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
-import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -34,13 +34,13 @@ import org.mockito.Mockito;
 @Category({RegionServerTests.class, SmallTests.class})
 public class TestDefaultStoreEngine {
   public static class DummyStoreFlusher extends DefaultStoreFlusher {
-    public DummyStoreFlusher(Configuration conf, Store store) {
+    public DummyStoreFlusher(Configuration conf, HStore store) {
       super(conf, store);
     }
   }
 
   public static class DummyCompactor extends DefaultCompactor {
-    public DummyCompactor(Configuration conf, Store store) {
+    public DummyCompactor(Configuration conf, HStore store) {
       super(conf, store);
     }
   }
@@ -59,7 +59,7 @@ public class TestDefaultStoreEngine {
         DummyCompactionPolicy.class.getName());
     conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
         DummyStoreFlusher.class.getName());
-    Store mockStore = Mockito.mock(Store.class);
+    HStore mockStore = Mockito.mock(HStore.class);
     StoreEngine<?, ?, ?, ?> se = StoreEngine.create(mockStore, conf, CellComparator.COMPARATOR);
     Assert.assertTrue(se instanceof DefaultStoreEngine);
     Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
index 9acf244..f58d19a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
@@ -210,14 +210,14 @@ public class TestEncryptionKeyRotation {
     boolean compacted = false;
     for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
         .getOnlineRegions(tableName)) {
-      for (Store store : region.getStores()) {
+      for (HStore store : ((HRegion) region).getStores()) {
         compacted = false;
         while (!compacted) {
           if (store.getStorefiles() != null) {
             while (store.getStorefilesCount() != 1) {
               Thread.sleep(100);
             }
-            for (StoreFile storefile : store.getStorefiles()) {
+            for (HStoreFile storefile : store.getStorefiles()) {
               if (!storefile.isCompactedAway()) {
                 compacted = true;
                 break;
@@ -234,10 +234,10 @@ public class TestEncryptionKeyRotation {
 
   private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
     List<Path> paths = new ArrayList<>();
-    for (Region region:
-        TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) {
-      for (Store store: region.getStores()) {
-        for (StoreFile storefile: store.getStorefiles()) {
+    for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
+        .getOnlineRegions(tableName)) {
+      for (HStore store : ((HRegion) region).getStores()) {
+        for (HStoreFile storefile : store.getStorefiles()) {
           paths.add(storefile.getPath());
         }
       }
@@ -247,13 +247,13 @@ public class TestEncryptionKeyRotation {
 
   private static List<Path> findCompactedStorefilePaths(TableName tableName) throws Exception {
     List<Path> paths = new ArrayList<>();
-    for (Region region:
-        TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) {
-      for (Store store : region.getStores()) {
-        Collection<StoreFile> compactedfiles =
-            ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
+    for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
+        .getOnlineRegions(tableName)) {
+      for (HStore store : ((HRegion) region).getStores()) {
+        Collection<HStoreFile> compactedfiles =
+            store.getStoreEngine().getStoreFileManager().getCompactedfiles();
         if (compactedfiles != null) {
-          for (StoreFile storefile : compactedfiles) {
+          for (HStoreFile storefile : compactedfiles) {
             paths.add(storefile.getPath());
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
index 3837e94..75c752c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
@@ -55,8 +55,8 @@ public class TestEncryptionRandomKeying {
     List<Path> paths = new ArrayList<>();
     for (Region region:
         TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) {
-      for (Store store: region.getStores()) {
-        for (StoreFile storefile: store.getStorefiles()) {
+      for (HStore store : ((HRegion) region).getStores()) {
+        for (HStoreFile storefile : store.getStorefiles()) {
           paths.add(storefile.getPath());
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index daddb5c..48081bd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -94,7 +94,7 @@ public class TestFSErrorsExposed {
     TestHStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
-    StoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf,
+    HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf,
         BloomType.NONE, true);
     sf.initReader();
     StoreFileReader reader = sf.getReader();
@@ -144,12 +144,12 @@ public class TestFSErrorsExposed {
     TestHStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
-    StoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf,
+    HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf,
         BloomType.NONE, true);
 
     List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
         Collections.singletonList(sf), false, true, false, false,
-        // 0 is passed as readpoint because this test operates on StoreFile directly
+        // 0 is passed as readpoint because this test operates on HStoreFile directly
         0);
     KeyValueScanner scanner = scanners.get(0);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 603203a..fc0659f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -518,7 +518,7 @@ public class TestHMobStore {
     this.store.add(new KeyValue(row, family, qf6, 1, value), null);
     flush(2);
 
-    Collection<StoreFile> storefiles = this.store.getStorefiles();
+    Collection<HStoreFile> storefiles = this.store.getStorefiles();
     checkMobHFileEncrytption(storefiles);
 
     // Scan the values
@@ -547,8 +547,8 @@ public class TestHMobStore {
     checkMobHFileEncrytption(this.store.getStorefiles());
   }
 
-  private void checkMobHFileEncrytption(Collection<StoreFile> storefiles) {
-    StoreFile storeFile = storefiles.iterator().next();
+  private void checkMobHFileEncrytption(Collection<HStoreFile> storefiles) {
+    HStoreFile storeFile = storefiles.iterator().next();
     HFile.Reader reader = storeFile.getReader().getHFileReader();
     byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
     Assert.assertTrue(null != encryptionKey);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 334df17..0b0d651 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -18,6 +18,51 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -84,25 +129,14 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
 import org.apache.hadoop.hbase.regionserver.Region.RowLock;
-import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem;
+import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem;
 import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler;
 import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
@@ -116,6 +150,7 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.FaultyFSLog;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider;
@@ -136,50 +171,16 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
 
 /**
  * Basic stand-alone testing of HRegion.  No clusters!
@@ -897,7 +898,7 @@ public class TestHRegion {
       // this will create a region with 3 files
       assertEquals(3, region.getStore(family).getStorefilesCount());
       List<Path> storeFiles = new ArrayList<>(3);
-      for (StoreFile sf : region.getStore(family).getStorefiles()) {
+      for (HStoreFile sf : region.getStore(family).getStorefiles()) {
         storeFiles.add(sf.getPath());
       }
 
@@ -958,8 +959,8 @@ public class TestHRegion {
       }
 
       // now check whether we have only one store file, the compacted one
-      Collection<StoreFile> sfs = region.getStore(family).getStorefiles();
-      for (StoreFile sf : sfs) {
+      Collection<HStoreFile> sfs = region.getStore(family).getStorefiles();
+      for (HStoreFile sf : sfs) {
         LOG.info(sf.getPath());
       }
       if (!mismatchedRegionName) {
@@ -1011,7 +1012,7 @@ public class TestHRegion {
       // this will create a region with 3 files from flush
       assertEquals(3, region.getStore(family).getStorefilesCount());
       List<String> storeFiles = new ArrayList<>(3);
-      for (StoreFile sf : region.getStore(family).getStorefiles()) {
+      for (HStoreFile sf : region.getStore(family).getStorefiles()) {
         storeFiles.add(sf.getPath().getName());
       }
 
@@ -4052,8 +4053,8 @@ public class TestHRegion {
       }
       // before compaction
       HStore store = (HStore) region.getStore(fam1);
-      Collection<StoreFile> storeFiles = store.getStorefiles();
-      for (StoreFile storefile : storeFiles) {
+      Collection<HStoreFile> storeFiles = store.getStorefiles();
+      for (HStoreFile storefile : storeFiles) {
         StoreFileReader reader = storefile.getReader();
         reader.loadFileInfo();
         reader.loadBloomfilter();
@@ -4065,7 +4066,7 @@ public class TestHRegion {
 
       // after compaction
       storeFiles = store.getStorefiles();
-      for (StoreFile storefile : storeFiles) {
+      for (HStoreFile storefile : storeFiles) {
         StoreFileReader reader = storefile.getReader();
         reader.loadFileInfo();
         reader.loadBloomfilter();
@@ -4814,7 +4815,7 @@ public class TestHRegion {
       secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
 
       // move the file of the primary region to the archive, simulating a compaction
-      Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
+      Collection<HStoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
       primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
       Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem()
           .getStoreFiles(families[0]);
@@ -5843,9 +5844,9 @@ public class TestHRegion {
     put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
     region.put(put);
     region.flush(true);
-    Store store = region.getStore(fam1);
-    Collection<StoreFile> storefiles = store.getStorefiles();
-    for (StoreFile sf : storefiles) {
+    HStore store = region.getStore(fam1);
+    Collection<HStoreFile> storefiles = store.getStorefiles();
+    for (HStoreFile sf : storefiles) {
       assertFalse("Tags should not be present "
           ,sf.getReader().getHFileReader().getFileContext().isIncludesTags());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 62ada8f..ab98ec9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -238,9 +238,9 @@ public class TestHRegionReplayEvents {
     verifyData(secondaryRegion, 0, 1000, cq, families);
 
     // close the region, and inspect that it has not flushed
-    Map<byte[], List<StoreFile>> files = secondaryRegion.close(false);
+    Map<byte[], List<HStoreFile>> files = secondaryRegion.close(false);
     // assert that there are no files (due to flush)
-    for (List<StoreFile> f : files.values()) {
+    for (List<HStoreFile> f : files.values()) {
       assertTrue(f.isEmpty());
     }
   }
@@ -1524,8 +1524,8 @@ public class TestHRegionReplayEvents {
       storeFileName.addAll(storeDesc.getStoreFileList());
     }
     // assert that the bulk loaded files are picked
-    for (Store s : secondaryRegion.getStores()) {
-      for (StoreFile sf : s.getStorefiles()) {
+    for (HStore s : secondaryRegion.getStores()) {
+      for (HStoreFile sf : s.getStorefiles()) {
         storeFileName.remove(sf.getPath().getName());
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 97f8ce3..af64be6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -17,11 +17,10 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -68,17 +67,13 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -87,6 +82,11 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
+
 /**
  * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
  * the region server's bullkLoad functionality.
@@ -160,7 +160,7 @@ public class TestHRegionServerBulkLoad {
         KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
         writer.append(kv);
       }
-      writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
+      writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(now));
     } finally {
       writer.close();
     }


Mime
View raw message