phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mujt...@apache.org
Subject [06/15] Rename package from org.apache.hadoop.hbase.index.* to org.apache.phoenix.index.* to fix classloader issue causing mutable index performance regression - https://issues.apache.org/jira/browse/PHOENIX-38
Date Sat, 15 Feb 2014 00:07:39 GMT
http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
new file mode 100644
index 0000000..4d5f667
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.write.recovery;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Multimap;
+
+import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+
+
+public class PerRegionIndexWriteCache {
+
+  private Map<HRegion, Multimap<HTableInterfaceReference, Mutation>> cache =
+      new HashMap<HRegion, Multimap<HTableInterfaceReference, Mutation>>();
+
+
+  /**
+   * Get the edits for the current region. Removes the edits from the cache. To add them back, call
+   * {@link #addEdits(HRegion, HTableInterfaceReference, Collection)}.
+   * @param region
+   * @return Get the edits for the given region. Returns <tt>null</tt> if there are no pending edits
+   *         for the region
+   */
+  public Multimap<HTableInterfaceReference, Mutation> getEdits(HRegion region) {
+    return cache.remove(region);
+  }
+
+  /**
+   * @param region
+   * @param table
+   * @param collection
+   */
+  public void addEdits(HRegion region, HTableInterfaceReference table,
+      Collection<Mutation> collection) {
+    Multimap<HTableInterfaceReference, Mutation> edits = cache.get(region);
+    if (edits == null) {
+      edits = ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
+      cache.put(region, edits);
+    }
+    edits.putAll(table, collection);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
new file mode 100644
index 0000000..f36affb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.write.recovery;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
+import com.google.common.collect.Multimap;
+import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
+import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.write.IndexFailurePolicy;
+import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
+
+/**
+ * Tracks any failed writes in The {@link PerRegionIndexWriteCache}, given a
+ * {@link MultiIndexWriteFailureException} (which is thrown from the
+ * {@link TrackingParallelWriterIndexCommitter}. Any other exception failure causes the a server
+ * abort via the usual {@link KillServerOnFailurePolicy}.
+ */
+public class StoreFailuresInCachePolicy implements IndexFailurePolicy {
+
+  private KillServerOnFailurePolicy delegate;
+  private PerRegionIndexWriteCache cache;
+  private HRegion region;
+
+  /**
+   * @param failedIndexEdits cache to update when we find a failure
+   */
+  public StoreFailuresInCachePolicy(PerRegionIndexWriteCache failedIndexEdits) {
+    this.cache = failedIndexEdits;
+  }
+
+  @Override
+  public void setup(Stoppable parent, RegionCoprocessorEnvironment env) {
+    this.region = env.getRegion();
+    this.delegate = new KillServerOnFailurePolicy();
+    this.delegate.setup(parent, env);
+
+  }
+
+  @Override
+  public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException {
+    // if its not an exception we can handle, let the delegate take care of it
+    if (!(cause instanceof MultiIndexWriteFailureException)) {
+      delegate.handleFailure(attempted, cause);
+    }
+    List<HTableInterfaceReference> failedTables =
+        ((MultiIndexWriteFailureException) cause).getFailedTables();
+    for (HTableInterfaceReference table : failedTables) {
+      cache.addEdits(this.region, table, attempted.get(table));
+    }
+  }
+
+
+  @Override
+  public void stop(String why) {
+    this.delegate.stop(why);
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.delegate.isStopped();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
new file mode 100644
index 0000000..2517b8f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.write.recovery;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+import com.google.common.collect.Multimap;
+import org.apache.phoenix.hbase.index.CapturingAbortable;
+import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
+import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
+import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure;
+import org.apache.phoenix.hbase.index.parallel.Task;
+import org.apache.phoenix.hbase.index.parallel.TaskBatch;
+import org.apache.phoenix.hbase.index.parallel.TaskRunner;
+import org.apache.phoenix.hbase.index.parallel.ThreadPoolBuilder;
+import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager;
+import org.apache.phoenix.hbase.index.parallel.WaitForCompletionTaskRunner;
+import org.apache.phoenix.hbase.index.table.CachingHTableFactory;
+import org.apache.phoenix.hbase.index.table.HTableFactory;
+import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.write.IndexCommitter;
+import org.apache.phoenix.hbase.index.write.IndexWriter;
+import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
+import org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter;
+
+/**
+ * Like the {@link ParallelWriterIndexCommitter}, but blocks until all writes have attempted to
+ * allow the caller to retrieve the failed and succeeded index updates. Therefore, this class will
+ * be a lot slower, in the face of failures, when compared to the
+ * {@link ParallelWriterIndexCommitter} (though as fast for writes), so it should be used only when
+ * you need to at least attempt all writes and know their result; for instance, this is fine for
+ * doing WAL recovery - it's not a performance intensive situation and we want to limit the the
+ * edits we need to retry.
+ * <p>
+ * On failure to {@link #write(Multimap)}, we return a {@link MultiIndexWriteFailureException} that
+ * contains the list of {@link HTableInterfaceReference} that didn't complete successfully.
+ * <p>
+ * Failures to write to the index can happen several different ways:
+ * <ol>
+ * <li><tt>this</tt> is {@link #stop(String) stopped} or aborted (via the passed {@link Abortable}.
+ * This causing any pending tasks to fail whatever they are doing as fast as possible. Any writes
+ * that have not begun are not even attempted and marked as failures.</li>
+ * <li>A batch write fails. This is the generic HBase write failure - it may occur because the index
+ * table is not available, .META. or -ROOT- is unavailable, or any other (of many) possible HBase
+ * exceptions.</li>
+ * </ol>
+ * Regardless of how the write fails, we still wait for all writes to complete before passing the
+ * failure back to the client.
+ */
+public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
+  private static final Log LOG = LogFactory.getLog(TrackingParallelWriterIndexCommitter.class);
+
+  public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.trackingwriter.threads.max";
+  private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
+  private static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY =
+      "index.trackingwriter.threads.keepalivetime";
+  
+  private TaskRunner pool;
+  private HTableFactory factory;
+  private CapturingAbortable abortable;
+  private Stoppable stopped;
+
+  @Override
+  public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name) {
+    Configuration conf = env.getConfiguration();
+    setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env),
+      ThreadPoolManager.getExecutor(
+        new ThreadPoolBuilder(name, conf).
+          setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY,
+            DEFAULT_CONCURRENT_INDEX_WRITER_THREADS).
+          setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env),
+      env.getRegionServerServices(), parent, CachingHTableFactory.getCacheSize(conf));
+  }
+
+  /**
+   * Setup <tt>this</tt>.
+   * <p>
+   * Exposed for TESTING
+   */
+  void setup(HTableFactory factory, ExecutorService pool, Abortable abortable, Stoppable stop,
+      int cacheSize) {
+    this.pool = new WaitForCompletionTaskRunner(pool);
+    this.factory = new CachingHTableFactory(factory, cacheSize);
+    this.abortable = new CapturingAbortable(abortable);
+    this.stopped = stop;
+  }
+
+  @Override
+  public void write(Multimap<HTableInterfaceReference, Mutation> toWrite)
+      throws MultiIndexWriteFailureException {
+    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
+    TaskBatch<Boolean> tasks = new TaskBatch<Boolean>(entries.size());
+    List<HTableInterfaceReference> tables = new ArrayList<HTableInterfaceReference>(entries.size());
+    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
+      // get the mutations for each table. We leak the implementation here a little bit to save
+      // doing a complete copy over of all the index update for each table.
+      final List<Mutation> mutations = (List<Mutation>) entry.getValue();
+      // track each reference so we can get at it easily later, when determing failures
+      final HTableInterfaceReference tableReference = entry.getKey();
+      tables.add(tableReference);
+
+      /*
+       * Write a batch of index updates to an index table. This operation stops (is cancelable) via
+       * two mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the
+       * running thread. The former will only work if we are not in the midst of writing the current
+       * batch to the table, though we do check these status variables before starting and before
+       * writing the batch. The latter usage, interrupting the thread, will work in the previous
+       * situations as was at some points while writing the batch, depending on the underlying
+       * writer implementation (HTableInterface#batch is blocking, but doesn't elaborate when is
+       * supports an interrupt).
+       */
+      tasks.add(new Task<Boolean>() {
+
+        /**
+         * Do the actual write to the primary table. We don't need to worry about closing the table
+         * because that is handled the {@link CachingHTableFactory}.
+         */
+        @Override
+        public Boolean call() throws Exception {
+          try {
+            // this may have been queued, but there was an abort/stop so we try to early exit
+            throwFailureIfDone();
+
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
+            }
+            HTableInterface table = factory.getTable(tableReference.get());
+            throwFailureIfDone();
+            table.batch(mutations);
+          } catch (InterruptedException e) {
+            // reset the interrupt status on the thread
+            Thread.currentThread().interrupt();
+            throw e;
+          } catch (Exception e) {
+            throw e;
+          }
+          return Boolean.TRUE;
+        }
+
+        private void throwFailureIfDone() throws SingleIndexWriteFailureException {
+          if (stopped.isStopped() || abortable.isAborted()
+              || Thread.currentThread().isInterrupted()) {
+            throw new SingleIndexWriteFailureException(
+                "Pool closed, not attempting to write to the index!", null);
+          }
+
+        }
+      });
+    }
+
+    List<Boolean> results = null;
+    try {
+      LOG.debug("Waiting on index update tasks to complete...");
+      results = this.pool.submitUninterruptible(tasks);
+    } catch (ExecutionException e) {
+      throw new RuntimeException(
+          "Should not fail on the results while using a WaitForCompletionTaskRunner", e);
+    } catch (EarlyExitFailure e) {
+      throw new RuntimeException("Stopped while waiting for batch, quiting!", e);
+    }
+    
+    // track the failures. We only ever access this on return from our calls, so no extra
+    // synchronization is needed. We could update all the failures as we find them, but that add a
+    // lot of locking overhead, and just doing the copy later is about as efficient.
+    List<HTableInterfaceReference> failures = new ArrayList<HTableInterfaceReference>();
+    int index = 0;
+    for (Boolean result : results) {
+      // there was a failure
+      if (result == null) {
+        // we know which table failed by the index of the result
+        failures.add(tables.get(index));
+      }
+      index++;
+    }
+
+    // if any of the tasks failed, then we need to propagate the failure
+    if (failures.size() > 0) {
+      // make the list unmodifiable to avoid any more synchronization concerns
+      throw new MultiIndexWriteFailureException(Collections.unmodifiableList(failures));
+    }
+    return;
+  }
+
+  @Override
+  public void stop(String why) {
+    LOG.info("Shutting down " + this.getClass().getSimpleName());
+    this.pool.stop(why);
+    this.factory.shutdown();
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.stopped.isStopped();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
index 5a57f0b..1c45cd3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 
-import org.apache.hadoop.hbase.index.covered.IndexCodec;
+import org.apache.phoenix.hbase.index.covered.IndexCodec;
 
 /**
  *

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index f44e4fa..3b28382 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -35,14 +35,14 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.index.ValueGetter;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.client.KeyValueBuilder;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index 6f2290d..6a0aae3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.index.covered.CoveredColumnsIndexBuilder;
-import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.compile.ScanRanges;
+import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
+import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.util.SchemaUtil;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index 02bb066..762ca7c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -32,14 +32,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.index.ValueGetter;
-import org.apache.hadoop.hbase.index.covered.IndexCodec;
-import org.apache.hadoop.hbase.index.covered.IndexUpdate;
-import org.apache.hadoop.hbase.index.covered.TableState;
-import org.apache.hadoop.hbase.index.scanner.Scanner;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
-import org.apache.hadoop.hbase.index.write.IndexWriter;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.IndexMetaDataCache;
 import org.apache.phoenix.cache.ServerCacheClient;
@@ -47,6 +39,14 @@ import org.apache.phoenix.cache.TenantCache;
 import org.apache.phoenix.client.KeyValueBuilder;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.IndexCodec;
+import org.apache.phoenix.hbase.index.covered.IndexUpdate;
+import org.apache.phoenix.hbase.index.covered.TableState;
+import org.apache.phoenix.hbase.index.scanner.Scanner;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
+import org.apache.phoenix.hbase.index.write.IndexWriter;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ServerUtil;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index f5b89f8..788adbf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -32,11 +32,11 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 
 import com.google.common.collect.Multimap;
-import org.apache.hadoop.hbase.index.table.HTableInterfaceReference;
-import org.apache.hadoop.hbase.index.write.KillServerOnFailurePolicy;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.util.SchemaUtil;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
index 71b7f54..846e4cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
@@ -29,11 +29,11 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.xerial.snappy.Snappy;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.cache.HashCache;
 import org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
index afdafa1..3386cda 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashJoinInfo.java
@@ -24,9 +24,9 @@ import java.util.List;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.io.WritableUtils;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.parse.JoinTableNode.JoinType;
 import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.PColumn;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 46c0ddc..8b73e49 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -48,8 +48,6 @@ import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.covered.CoveredColumnsIndexBuilder;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -71,6 +69,8 @@ import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.Indexer;
+import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
 import org.apache.phoenix.index.PhoenixIndexBuilder;
 import org.apache.phoenix.index.PhoenixIndexCodec;
 import org.apache.phoenix.jdbc.PhoenixConnection;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 1bd14d3..b62c3e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -74,9 +74,9 @@ import java.math.BigDecimal;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.schema.MetaDataSplitPolicy;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNormalizedName;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
index 7853cc2..361ac36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
@@ -17,7 +17,7 @@
  */
 package org.apache.phoenix.schema;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.ByteUtil;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
index 8feca89..9d4a48e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
@@ -20,7 +20,7 @@ package org.apache.phoenix.schema;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.http.annotation.Immutable;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 @Immutable
 public class PNameImpl implements PName {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 88de71d..ee95f46 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -38,11 +38,11 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.client.KeyValueBuilder;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
index 02cd70b..fd66d8a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
@@ -27,10 +27,10 @@ import java.util.Arrays;
 import java.util.Comparator;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.SortOrder;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 745f7dd..7075980 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -25,14 +25,14 @@ import java.util.Map;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.index.ValueGetter;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.client.KeyValueBuilder;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 9eb3907..a14e36a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -27,11 +27,11 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.KeyRange;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
index 2c29c20..3ebbeae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.tuple.Tuple;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
deleted file mode 100644
index f4efe35..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
-import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-
-/**
- * Utility class for testing indexing
- */
-public class IndexTestingUtils {
-
-  private static final Log LOG = LogFactory.getLog(IndexTestingUtils.class);
-  private static final String MASTER_INFO_PORT_KEY = "hbase.master.info.port";
-  private static final String RS_INFO_PORT_KEY = "hbase.regionserver.info.port";
-  
-  private IndexTestingUtils() {
-    // private ctor for util class
-  }
-
-  public static void setupConfig(Configuration conf) {
-      conf.setInt(MASTER_INFO_PORT_KEY, -1);
-      conf.setInt(RS_INFO_PORT_KEY, -1);
-    // setup our codec, so we get proper replay/write
-      conf.set(WALEditCodec.WAL_EDIT_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName());
-  }
-  /**
-   * Verify the state of the index table between the given key and time ranges against the list of
-   * expected keyvalues.
-   * @throws IOException
-   */
-  @SuppressWarnings("javadoc")
-  public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected,
-      long start, long end, byte[] startKey, byte[] endKey) throws IOException {
-    LOG.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
-        + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey)
-        + "].");
-    Scan s = new Scan(startKey, endKey);
-    // s.setRaw(true);
-    s.setMaxVersions();
-    s.setTimeRange(start, end);
-    List<KeyValue> received = new ArrayList<KeyValue>();
-    ResultScanner scanner = index1.getScanner(s);
-    for (Result r : scanner) {
-      received.addAll(r.list());
-      LOG.debug("Received: " + r.list());
-    }
-    scanner.close();
-    assertEquals("Didn't get the expected kvs from the index table!", expected, received);
-  }
-
-  public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected, long ts,
-      byte[] startKey) throws IOException {
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, startKey, HConstants.EMPTY_END_ROW);
-  }
-
-  public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected, long start,
-      byte[] startKey, byte[] endKey) throws IOException {
-    verifyIndexTableAtTimestamp(index1, expected, start, start + 1, startKey, endKey);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/StubAbortable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/StubAbortable.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/StubAbortable.java
deleted file mode 100644
index b99c36d..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/StubAbortable.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Abortable;
-
-/**
- * TEst helper to stub out an {@link Abortable} when needed.
- */
-public class StubAbortable implements Abortable {
-  private static final Log LOG = LogFactory.getLog(StubAbortable.class);
-  private boolean abort;
-
-  @Override
-  public void abort(String reason, Throwable e) {
-    LOG.info("Aborting: " + reason, e);
-    abort = true;
-  }
-
-  @Override
-  public boolean isAborted() {
-    return abort;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TableName.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TableName.java
deleted file mode 100644
index dc48659..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TableName.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-
-/**
- * Returns a {@code byte[]} containing the name of the currently running test method.
- */
-public class TableName extends TestWatcher {
-  private String tableName;
-
-  /**
-   * Invoked when a test is about to start
-   */
-  @Override
-  protected void starting(Description description) {
-    tableName = description.getMethodName();
-  }
-
-  public byte[] getTableName() {
-    return Bytes.toBytes(tableName);
-  }
-
-  public String getTableNameString() {
-    return this.tableName;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
deleted file mode 100644
index 31fbe49..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.VersionInfo;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.covered.example.ColumnGroup;
-import org.apache.hadoop.hbase.index.covered.example.CoveredColumn;
-import org.apache.hadoop.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-
-/**
- * Test that we correctly fail for versions of HBase that don't support current properties
- */
-public class TestFailForUnsupportedHBaseVersions {
-  private static final Log LOG = LogFactory.getLog(TestFailForUnsupportedHBaseVersions.class);
-
-  /**
-   * We don't support WAL Compression for HBase &lt; 0.94.9, so we shouldn't even allow the server
-   * to start if both indexing and WAL Compression are enabled for the wrong versions.
-   */
-  @Test
-  public void testDoesNotSupportCompressedWAL() {
-    Configuration conf = HBaseConfiguration.create();
-    IndexTestingUtils.setupConfig(conf);
-    // get the current version
-    String version = VersionInfo.getVersion();
-    
-    // ensure WAL Compression not enabled
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
-    
-    //we support all versions without WAL Compression
-    String supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! All versions should"
-          + " support writing without a compressed WAL. Message: "+supported, supported);
-
-    // enable WAL Compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-
-    // set the version to something we know isn't supported
-    version = "0.94.4";
-    supported = Indexer.validateVersion(version, conf);
-    assertNotNull("WAL Compression was enabled, but incorrectly marked version as supported",
-      supported);
-    
-    //make sure the first version of 0.94 that supports Indexing + WAL Compression works
-    version = "0.94.9";
-    supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! Message: "+supported, supported);
-    
-    //make sure we support snapshot builds too
-    version = "0.94.9-SNAPSHOT";
-    supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! Message: "+supported, supported);
-  }
-
-  /**
-   * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
-   * version. The 'completeness' of this test requires that we run the test with both a version of
-   * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
-   * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
-   * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
-   * this functionality, we need to run the test against both a supported and an unsupported version
-   * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
-   * Codecs).
-   * @throws Exception on failure
-   */
-  @Test(timeout = 300000 /* 5 mins */)
-  public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    IndexTestingUtils.setupConfig(conf);
-    // enable WAL Compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-
-    // check the version to see if it isn't supported
-    String version = VersionInfo.getVersion();
-    boolean supported = false;
-    if (Indexer.validateVersion(version, conf) == null) {
-      supported = true;
-    }
-
-    // start the minicluster
-    HBaseTestingUtility util = new HBaseTestingUtility(conf);
-    util.startMiniCluster();
-
-    // setup the primary table
-    HTableDescriptor desc = new HTableDescriptor(
-        "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
-    byte[] family = Bytes.toBytes("f");
-    desc.addFamily(new HColumnDescriptor(family));
-
-    // enable indexing to a non-existant index table
-    String indexTableName = "INDEX_TABLE";
-    ColumnGroup fam1 = new ColumnGroup(indexTableName);
-    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    builder.addIndexGroup(fam1);
-    builder.build(desc);
-
-    // get a reference to the regionserver, so we can ensure it aborts
-    HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);
-
-    // create the primary table
-    HBaseAdmin admin = util.getHBaseAdmin();
-    if (supported) {
-      admin.createTable(desc);
-      assertFalse("Hosting regeion server failed, even the HBase version (" + version
-          + ") supports WAL Compression.", server.isAborted());
-    } else {
-      admin.createTableAsync(desc, null);
-
-      // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
-      // broken.
-      while (!server.isAborted()) {
-        LOG.debug("Waiting on regionserver to abort..");
-      }
-    }
-
-    // cleanup
-    util.shutdownMiniCluster();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/CoveredIndexCodecForTesting.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/CoveredIndexCodecForTesting.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/CoveredIndexCodecForTesting.java
deleted file mode 100644
index ccbd202..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/CoveredIndexCodecForTesting.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-
-import org.apache.phoenix.index.BaseIndexCodec;
-
-/**
- * An {@link IndexCodec} for testing that allow you to specify the index updates/deletes, regardless
- * of the current tables' state.
- */
-public class CoveredIndexCodecForTesting extends BaseIndexCodec {
-
-  private List<IndexUpdate> deletes = new ArrayList<IndexUpdate>();
-  private List<IndexUpdate> updates = new ArrayList<IndexUpdate>();
-
-  public void addIndexDelete(IndexUpdate... deletes) {
-    this.deletes.addAll(Arrays.asList(deletes));
-  }
-  
-  public void addIndexUpserts(IndexUpdate... updates) {
-    this.updates.addAll(Arrays.asList(updates));
-  }
-
-  public void clear() {
-    this.deletes.clear();
-    this.updates.clear();
-  }
-  
-  @Override
-  public Iterable<IndexUpdate> getIndexDeletes(TableState state) {
-    return this.deletes;
-  }
-
-  @Override
-  public Iterable<IndexUpdate> getIndexUpserts(TableState state) {
-    return this.updates;
-  }
-
-  @Override
-  public void initialize(RegionCoprocessorEnvironment env) throws IOException {
-    // noop
-  }
-
-  @Override
-  public boolean isEnabled(Mutation m) {
-    return true;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestCoveredColumns.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestCoveredColumns.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestCoveredColumns.java
deleted file mode 100644
index 5af0ad3..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestCoveredColumns.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-
-public class TestCoveredColumns {
-
-  private static final byte[] fam = Bytes.toBytes("fam");
-  private static final byte[] qual = Bytes.toBytes("qual");
-
-  @Test
-  public void testCovering() {
-    ColumnReference ref = new ColumnReference(fam, qual);
-    CoveredColumns columns = new CoveredColumns();
-    assertEquals("Should have only found a single column to cover", 1, columns
-        .findNonCoveredColumns(Arrays.asList(ref)).size());
-
-    columns.addColumn(ref);
-    assertEquals("Shouldn't have any columns to cover", 0,
-      columns.findNonCoveredColumns(Arrays.asList(ref)).size());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
deleted file mode 100644
index 026937f..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdge;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.TableName;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.scanner.Scanner;
-
-/**
- * End-to-End test of just the {@link CoveredColumnsIndexBuilder}, but with a simple
- * {@link IndexCodec} and BatchCache implementation.
- */
-public class TestEndToEndCoveredColumnsIndexBuilder {
-
-  public class TestState {
-
-    private HTable table;
-    private long ts;
-    private VerifyingIndexCodec codec;
-
-    /**
-     * @param primary
-     * @param codec
-     * @param ts
-     */
-    public TestState(HTable primary, VerifyingIndexCodec codec, long ts) {
-      this.table = primary;
-      this.ts = ts;
-      this.codec = codec;
-    }
-
-  }
-
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private static final byte[] row = Bytes.toBytes("row");
-  private static final byte[] family = Bytes.toBytes("FAM");
-  private static final byte[] qual = Bytes.toBytes("qual");
-  private static final HColumnDescriptor FAM1 = new HColumnDescriptor(family);
-
-  @Rule
-  public TableName TestTable = new TableName();
-
-  private TestState state;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void shutdownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    this.state = setupTest(TestTable.getTableNameString());
-  }
-    
-  private interface TableStateVerifier {
-
-    /**
-     * Verify that the state of the table is correct. Should fail the unit test if it isn't as
-     * expected.
-     * @param state
-     */
-    public void verify(TableState state);
-
-  }
-
-  /**
-   * {@link TableStateVerifier} that ensures the kvs returned from the table match the passed
-   * {@link KeyValue}s when querying on the given columns.
-   */
-  private class ListMatchingVerifier implements TableStateVerifier {
-
-    private List<KeyValue> expectedKvs;
-    private ColumnReference[] columns;
-    private String msg;
-
-    public ListMatchingVerifier(String msg, List<KeyValue> kvs, ColumnReference... columns) {
-      this.expectedKvs = kvs;
-      this.columns = columns;
-      this.msg = msg;
-    }
-
-    @Override
-    public void verify(TableState state) {
-      try {
-        Scanner kvs =
-            ((LocalTableState) state).getIndexedColumnsTableState(Arrays.asList(columns)).getFirst();
-
-        int count = 0;
-        KeyValue kv;
-        while ((kv = kvs.next()) != null) {
-          KeyValue next = expectedKvs.get(count++);
-          assertEquals(
-            msg + ": Unexpected kv in table state!\nexpected v1: "
-                + Bytes.toString(next.getValue()) + "\nactual v1:" + Bytes.toString(kv.getValue()),
-            next, kv);
-        }
-
-        assertEquals(msg + ": Didn't find enough kvs in table state!", expectedKvs.size(), count);
-      } catch (IOException e) {
-        fail(msg + ": Got an exception while reading local table state! " + e.getMessage());
-      }
-    }
-  }
-
-  private class VerifyingIndexCodec extends CoveredIndexCodecForTesting {
-
-    private Queue<TableStateVerifier> verifiers = new ArrayDeque<TableStateVerifier>();
-
-    @Override
-    public Iterable<IndexUpdate> getIndexDeletes(TableState state) {
-      verify(state);
-      return super.getIndexDeletes(state);
-    }
-
-    @Override
-    public Iterable<IndexUpdate> getIndexUpserts(TableState state) {
-      verify(state);
-      return super.getIndexUpserts(state);
-    }
-
-    private void verify(TableState state) {
-      TableStateVerifier verifier = verifiers.poll();
-      if (verifier == null) return;
-      verifier.verify(state);
-    }
-  }
-  
-  /**
-   * Test that we see the expected values in a {@link TableState} when doing single puts against a
-   * region.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExpectedResultsInTableStateForSinglePut() throws Exception {
-    //just do a simple Put to start with
-    long ts = state.ts;
-    Put p = new Put(row, ts);
-    p.add(family, qual, Bytes.toBytes("v1"));
-    
-    // get all the underlying kvs for the put
-    final List<KeyValue> expectedKvs = new ArrayList<KeyValue>();
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>();
-    allKvs.addAll(p.getFamilyMap().get(family));
-
-    // setup the verifier for the data we expect to write
-    // first call shouldn't have anything in the table
-    final ColumnReference familyRef =
-        new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS);
-
-    VerifyingIndexCodec codec = state.codec;
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", expectedKvs, familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 1", allKvs, familyRef));
-
-    // do the actual put (no indexing will actually be done)
-    HTable primary = state.table;
-    primary.put(p);
-    primary.flushCommits();
-
-    // now we do another put to the same row. We should see just the old row state, followed by the
-    // new + old
-    p = new Put(row, ts + 1);
-    p.add(family, qual, Bytes.toBytes("v2"));
-    expectedKvs.addAll(allKvs);
-    // add them first b/c the ts is newer
-    allKvs.addAll(0, p.get(family, qual));
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", expectedKvs, familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));
-    
-    // do the actual put
-    primary.put(p);
-    primary.flushCommits();
-
-    // cleanup after ourselves
-    cleanup(state);
-  }
-
-  /**
-   * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts.
-   * Previous implementations managed batches by playing current state against each element in the
-   * batch, rather than combining all the per-row updates into a single mutation for the batch. This
-   * test ensures that we see the correct expected state.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExpectedResultsInTableStateForBatchPuts() throws Exception {
-    long ts = state.ts;
-    // build up a list of puts to make, all on the same row
-    Put p1 = new Put(row, ts);
-    p1.add(family, qual, Bytes.toBytes("v1"));
-    Put p2 = new Put(row, ts + 1);
-    p2.add(family, qual, Bytes.toBytes("v2"));
-
-    // setup all the verifiers we need. This is just the same as above, but will be called twice
-    // since we need to iterate the batch.
-
-    // get all the underlying kvs for the put
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>(2);
-    allKvs.addAll(p2.getFamilyMap().get(family));
-    allKvs.addAll(p1.getFamilyMap().get(family));
-
-    // setup the verifier for the data we expect to write
-    // both puts should be put into a single batch
-    final ColumnReference familyRef =
-        new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS);
-    VerifyingIndexCodec codec = state.codec;
-    // no previous state in the table
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
-        .<KeyValue> emptyList(), familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family),
-        familyRef));
-
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyMap().get(family),
-        familyRef));
-    // kvs from both puts should be in the table now
-    codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));
-
-    // do the actual put (no indexing will actually be done)
-    HTable primary = state.table;
-    primary.setAutoFlush(false);
-    primary.put(Arrays.asList(p1, p2));
-    primary.flushCommits();
-
-    // cleanup after ourselves
-    cleanup(state);
-  }
-
-  /**
-   * @param tableName name of the table to create for the test
-   * @return the supporting state for the test
-   */
-  private TestState setupTest(String tableName) throws IOException {
-    byte[] tableNameBytes = Bytes.toBytes(tableName);
-    HTableDescriptor desc = new HTableDescriptor(tableNameBytes);
-    desc.addFamily(FAM1);
-    // add the necessary simple options to create the builder
-    Map<String, String> indexerOpts = new HashMap<String, String>();
-    // just need to set the codec - we are going to set it later, but we need something here or the
-    // initializer blows up.
-    indexerOpts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY,
-      CoveredIndexCodecForTesting.class.getName());
-    Indexer.enableIndexing(desc, CoveredColumnsIndexBuilder.class, indexerOpts);
-
-    // create the table
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    admin.createTable(desc);
-    HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);
-
-    // overwrite the codec so we can verify the current state
-    HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
-    Indexer indexer =
-        (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
-    CoveredColumnsIndexBuilder builder =
-        (CoveredColumnsIndexBuilder) indexer.getBuilderForTesting();
-    VerifyingIndexCodec codec = new VerifyingIndexCodec();
-    builder.setIndexCodecForTesting(codec);
-
-    // setup the Puts we want to write
-    final long ts = System.currentTimeMillis();
-    EnvironmentEdge edge = new EnvironmentEdge() {
-
-      @Override
-      public long currentTimeMillis() {
-        return ts;
-      }
-    };
-    EnvironmentEdgeManager.injectEdge(edge);
-
-    return new TestState(primary, codec, ts);
-  }
-
-  /**
-   * Cleanup the test based on the passed state.
-   * @param state
-   */
-  private void cleanup(TestState state) throws IOException {
-    EnvironmentEdgeManager.reset();
-    state.table.close();
-    UTIL.deleteTable(state.table.getTableName());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
deleted file mode 100644
index e20b8bb..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import org.apache.hadoop.hbase.index.covered.data.LocalHBaseState;
-import org.apache.hadoop.hbase.index.covered.data.LocalTable;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.scanner.Scanner;
-
-/**
- *
- */
-public class TestLocalTableState {
-
-  private static final byte[] row = Bytes.toBytes("row");
-  private static final byte[] fam = Bytes.toBytes("fam");
-  private static final byte[] qual = Bytes.toBytes("qual");
-  private static final byte[] val = Bytes.toBytes("val");
-  private static final long ts = 10;
-
-  @SuppressWarnings("unchecked")
-  @Test
-  public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
-    Put m = new Put(row);
-    m.add(fam, qual, ts, val);
-    // setup mocks
-    Configuration conf = new Configuration(false);
-    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
-    Mockito.when(env.getConfiguration()).thenReturn(conf);
-
-    HRegion region = Mockito.mock(HRegion.class);
-    Mockito.when(env.getRegion()).thenReturn(region);
-    RegionScanner scanner = Mockito.mock(RegionScanner.class);
-    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
-    final byte[] stored = Bytes.toBytes("stored-value");
-    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
-        KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-        kv.setMemstoreTS(0);
-        list.add(kv);
-        return false;
-      }
-    });
-
-
-    LocalHBaseState state = new LocalTable(env);
-    LocalTableState table = new LocalTableState(env, state, m);
-    //add the kvs from the mutation
-    table.addPendingUpdates(m.get(fam, qual));
-
-    // setup the lookup
-    ColumnReference col = new ColumnReference(fam, qual);
-    table.setCurrentTimestamp(ts);
-    //check that our value still shows up first on scan, even though this is a lazy load
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col));
-    Scanner s = p.getFirst();
-    assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
-  }
-
-  /**
-   * Test that we correctly rollback the state of keyvalue
-   * @throws Exception
-   */
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testCorrectRollback() throws Exception {
-    Put m = new Put(row);
-    m.add(fam, qual, ts, val);
-    // setup mocks
-    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
-
-    HRegion region = Mockito.mock(HRegion.class);
-    Mockito.when(env.getRegion()).thenReturn(region);
-    RegionScanner scanner = Mockito.mock(RegionScanner.class);
-    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
-    final byte[] stored = Bytes.toBytes("stored-value");
-    final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-    storedKv.setMemstoreTS(2);
-    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
-
-        list.add(storedKv);
-        return false;
-      }
-    });
-    LocalHBaseState state = new LocalTable(env);
-    LocalTableState table = new LocalTableState(env, state, m);
-    // add the kvs from the mutation
-    KeyValue kv = m.get(fam, qual).get(0);
-    kv.setMemstoreTS(0);
-    table.addPendingUpdates(kv);
-
-    // setup the lookup
-    ColumnReference col = new ColumnReference(fam, qual);
-    table.setCurrentTimestamp(ts);
-    // check that the value is there
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col));
-    Scanner s = p.getFirst();
-    assertEquals("Didn't get the pending mutation's value first", kv, s.next());
-
-    // rollback that value
-    table.rollback(Arrays.asList(kv));
-    p = table.getIndexedColumnsTableState(Arrays.asList(col));
-    s = p.getFirst();
-    assertEquals("Didn't correctly rollback the row - still found it!", null, s.next());
-    Mockito.verify(env, Mockito.times(1)).getRegion();
-    Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
-  }
-
-  @SuppressWarnings("unchecked")
-  @Test
-  public void testOnlyLoadsRequestedColumns() throws Exception {
-    // setup mocks
-    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
-
-    HRegion region = Mockito.mock(HRegion.class);
-    Mockito.when(env.getRegion()).thenReturn(region);
-    RegionScanner scanner = Mockito.mock(RegionScanner.class);
-    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
-    final KeyValue storedKv =
-        new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
-    storedKv.setMemstoreTS(2);
-    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
-
-        list.add(storedKv);
-        return false;
-      }
-    });
-    LocalHBaseState state = new LocalTable(env);
-    Put pendingUpdate = new Put(row);
-    pendingUpdate.add(fam, qual, ts, val);
-    LocalTableState table = new LocalTableState(env, state, pendingUpdate);
-
-    // do the lookup for the given column
-    ColumnReference col = new ColumnReference(fam, qual);
-    table.setCurrentTimestamp(ts);
-    // check that the value is there
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col));
-    Scanner s = p.getFirst();
-    // make sure it read the table the one time
-    assertEquals("Didn't get the stored keyvalue!", storedKv, s.next());
-
-    // on the second lookup it shouldn't access the underlying table again - the cached columns
-    // should know they are done
-    p = table.getIndexedColumnsTableState(Arrays.asList(col));
-    s = p.getFirst();
-    assertEquals("Lost already loaded update!", storedKv, s.next());
-    Mockito.verify(env, Mockito.times(1)).getRegion();
-    Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
-  }
-
-  // TODO add test here for making sure multiple column references with the same column family don't
-  // cause an infinite loop
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
deleted file mode 100644
index ad9293e..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.data;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-public class TestIndexMemStore {
-
-  private static final byte[] row = Bytes.toBytes("row");
-  private static final byte[] family = Bytes.toBytes("family");
-  private static final byte[] qual = Bytes.toBytes("qual");
-  private static final byte[] val = Bytes.toBytes("val");
-  private static final byte[] val2 = Bytes.toBytes("val2");
-
-  @Test
-  public void testCorrectOverwritting() throws Exception {
-    IndexMemStore store = new IndexMemStore(IndexMemStore.COMPARATOR);
-    long ts = 10;
-    KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
-    kv.setMemstoreTS(2);
-    KeyValue kv2 = new KeyValue(row, family, qual, ts, Type.Put, val2);
-    kv2.setMemstoreTS(0);
-    store.add(kv, true);
-    // adding the exact same kv shouldn't change anything stored if not overwritting
-    store.add(kv2, false);
-    KeyValueScanner scanner = store.getScanner();
-    KeyValue first = KeyValue.createFirstOnRow(row);
-    scanner.seek(first);
-    assertTrue("Overwrote kv when specifically not!", kv == scanner.next());
-    scanner.close();
-
-    // now when we overwrite, we should get the newer one
-    store.add(kv2, true);
-    scanner = store.getScanner();
-    scanner.seek(first);
-    assertTrue("Didn't overwrite kv when specifically requested!", kv2 == scanner.next());
-    scanner.close();
-  }
-
-  /**
-   * We don't expect custom KeyValue creation, so we can't get into weird situations, where a
-   * {@link Type#DeleteFamily} has a column qualifier specified.
-   * @throws Exception
-   */
-  @Test
-  public void testExpectedOrdering() throws Exception {
-    IndexMemStore store = new IndexMemStore();
-    KeyValue kv = new KeyValue(row, family, qual, 12, Type.Put, val);
-    store.add(kv, true);
-    KeyValue kv2 = new KeyValue(row, family, qual, 10, Type.Put, val2);
-    store.add(kv2, true);
-    KeyValue df = new KeyValue(row, family, null, 11, Type.DeleteFamily, null);
-    store.add(df, true);
-    KeyValue dc = new KeyValue(row, family, qual, 11, Type.DeleteColumn, null);
-    store.add(dc, true);
-    KeyValue d = new KeyValue(row, family, qual, 12, Type.Delete, null);
-    store.add(d, true);
-
-    // null qualifiers should always sort before the non-null cases
-    KeyValueScanner scanner = store.getScanner();
-    KeyValue first = KeyValue.createFirstOnRow(row);
-    assertTrue("Didn't have any data in the scanner", scanner.seek(first));
-    assertTrue("Didn't get delete family first (no qualifier == sort first)", df == scanner.next());
-    assertTrue("Didn't get point delete before corresponding put", d == scanner.next());
-    assertTrue("Didn't get larger ts Put", kv == scanner.next());
-    assertTrue("Didn't get delete column before corresponding put(delete sorts first)",
-      dc == scanner.next());
-    assertTrue("Didn't get smaller ts Put", kv2 == scanner.next());
-    assertNull("Have more data in the scanner", scanner.next());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestColumnTracker.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestColumnTracker.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestColumnTracker.java
deleted file mode 100644
index a349149..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestColumnTracker.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.covered.update.ColumnTracker;
-
-public class TestColumnTracker {
-
-  @Test
-  public void testEnsureGuarranteedMinValid() {
-    assertFalse("Guarranted min wasn't recognized as having newer timestamps!",
-      ColumnTracker.isNewestTime(ColumnTracker.GUARANTEED_NEWER_UPDATES));
-  }
-
-  @Test
-  public void testOnlyKeepsOlderTimestamps() {
-    Collection<ColumnReference> columns = new ArrayList<ColumnReference>();
-    ColumnTracker tracker = new ColumnTracker(columns);
-    tracker.setTs(10);
-    assertEquals("Column tracker didn't set original TS", 10, tracker.getTS());
-    tracker.setTs(12);
-    assertEquals("Column tracker allowed newer timestamp to be set.", 10, tracker.getTS());
-    tracker.setTs(9);
-    assertEquals("Column tracker didn't decrease set timestamp for smaller value", 9,
-      tracker.getTS());
-  }
-
-  @Test
-  public void testHasNewerTimestamps() throws Exception {
-    Collection<ColumnReference> columns = new ArrayList<ColumnReference>();
-    ColumnTracker tracker = new ColumnTracker(columns);
-    assertFalse("Tracker has newer timestamps when no ts set", tracker.hasNewerTimestamps());
-    tracker.setTs(10);
-    assertTrue("Tracker doesn't have newer timetamps with set ts", tracker.hasNewerTimestamps());
-  }
-}
\ No newline at end of file


Mime
View raw message