phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject [02/41] PHOENIX-130 Separate execution of slow (integration) tests from fast unit tests (Gabriel Reid)
Date Wed, 12 Mar 2014 21:31:29 GMT
http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TestFailForUnsupportedHBaseVersions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TestFailForUnsupportedHBaseVersions.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TestFailForUnsupportedHBaseVersions.java
deleted file mode 100644
index 43ec826..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TestFailForUnsupportedHBaseVersions.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.VersionInfo;
-import org.junit.Test;
-
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-
-/**
- * Test that we correctly fail for versions of HBase that don't support current properties
- */
-public class TestFailForUnsupportedHBaseVersions {
-  private static final Log LOG = LogFactory.getLog(TestFailForUnsupportedHBaseVersions.class);
-
-  /**
-   * We don't support WAL Compression for HBase < 0.94.9, so we shouldn't even allow the server
-   * to start if both indexing and WAL Compression are enabled for the wrong versions.
-   */
-  @Test
-  public void testDoesNotSupportCompressedWAL() {
-    Configuration conf = HBaseConfiguration.create();
-    IndexTestingUtils.setupConfig(conf);
-    // get the current version
-    String version = VersionInfo.getVersion();
-    
-    // ensure WAL Compression not enabled
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
-    
-    //we support all versions without WAL Compression
-    String supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! All versions should"
-          + " support writing without a compressed WAL. Message: "+supported, supported);
-
-    // enable WAL Compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-
-    // set the version to something we know isn't supported
-    version = "0.94.4";
-    supported = Indexer.validateVersion(version, conf);
-    assertNotNull("WAL Compression was enabled, but incorrectly marked version as supported",
-      supported);
-    
-    //make sure the first version of 0.94 that supports Indexing + WAL Compression works
-    version = "0.94.9";
-    supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! Message: "+supported, supported);
-    
-    //make sure we support snapshot builds too
-    version = "0.94.9-SNAPSHOT";
-    supported = Indexer.validateVersion(version, conf);
-    assertNull(
-      "WAL Compression wasn't enabled, but version "+version+" of HBase wasn't supported! Message: "+supported, supported);
-  }
-
-  /**
-   * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
-   * version. The 'completeness' of this test requires that we run the test with both a version of
-   * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
-   * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
-   * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
-   * this functionality, we need to run the test against both a supported and an unsupported version
-   * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
-   * Codecs).
-   * @throws Exception on failure
-   */
-  @Test(timeout = 300000 /* 5 mins */)
-  public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    IndexTestingUtils.setupConfig(conf);
-    // enable WAL Compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-
-    // check the version to see if it isn't supported
-    String version = VersionInfo.getVersion();
-    boolean supported = false;
-    if (Indexer.validateVersion(version, conf) == null) {
-      supported = true;
-    }
-
-    // start the minicluster
-    HBaseTestingUtility util = new HBaseTestingUtility(conf);
-    util.startMiniCluster();
-
-    // setup the primary table
-    HTableDescriptor desc = new HTableDescriptor(
-        "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
-    byte[] family = Bytes.toBytes("f");
-    desc.addFamily(new HColumnDescriptor(family));
-
-    // enable indexing to a non-existant index table
-    String indexTableName = "INDEX_TABLE";
-    ColumnGroup fam1 = new ColumnGroup(indexTableName);
-    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    builder.addIndexGroup(fam1);
-    builder.build(desc);
-
-    // get a reference to the regionserver, so we can ensure it aborts
-    HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);
-
-    // create the primary table
-    HBaseAdmin admin = util.getHBaseAdmin();
-    if (supported) {
-      admin.createTable(desc);
-      assertFalse("Hosting regeion server failed, even the HBase version (" + version
-          + ") supports WAL Compression.", server.isAborted());
-    } else {
-      admin.createTableAsync(desc, null);
-
-      // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
-      // broken.
-      while (!server.isAborted()) {
-        LOG.debug("Waiting on regionserver to abort..");
-      }
-    }
-
-    // cleanup
-    util.shutdownMiniCluster();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
deleted file mode 100644
index edcb8d5..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdge;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
-import org.apache.phoenix.hbase.index.covered.IndexCodec;
-import org.apache.phoenix.hbase.index.covered.IndexUpdate;
-import org.apache.phoenix.hbase.index.covered.LocalTableState;
-import org.apache.phoenix.hbase.index.covered.TableState;
-import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
-import org.apache.phoenix.hbase.index.scanner.Scanner;
-
-/**
- * End-to-End test of just the {@link CoveredColumnsIndexBuilder}, but with a simple
- * {@link IndexCodec} and BatchCache implementation.
- */
-public class TestEndToEndCoveredColumnsIndexBuilder {
-
-  public class TestState {
-
-    private HTable table;
-    private long ts;
-    private VerifyingIndexCodec codec;
-
-    /**
-     * @param primary
-     * @param codec
-     * @param ts
-     */
-    public TestState(HTable primary, VerifyingIndexCodec codec, long ts) {
-      this.table = primary;
-      this.ts = ts;
-      this.codec = codec;
-    }
-
-  }
-
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private static final byte[] row = Bytes.toBytes("row");
-  private static final byte[] family = Bytes.toBytes("FAM");
-  private static final byte[] qual = Bytes.toBytes("qual");
-  private static final HColumnDescriptor FAM1 = new HColumnDescriptor(family);
-
-  @Rule
-  public TableName TestTable = new TableName();
-
-  private TestState state;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void shutdownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    this.state = setupTest(TestTable.getTableNameString());
-  }
-    
-  private interface TableStateVerifier {
-
-    /**
-     * Verify that the state of the table is correct. Should fail the unit test if it isn't as
-     * expected.
-     * @param state
-     */
-    public void verify(TableState state);
-
-  }
-
-  /**
-   * {@link TableStateVerifier} that ensures the kvs returned from the table match the passed
-   * {@link KeyValue}s when querying on the given columns.
-   */
-  private class ListMatchingVerifier implements TableStateVerifier {
-
-    private List<KeyValue> expectedKvs;
-    private ColumnReference[] columns;
-    private String msg;
-
-    public ListMatchingVerifier(String msg, List<KeyValue> kvs, ColumnReference... columns) {
-      this.expectedKvs = kvs;
-      this.columns = columns;
-      this.msg = msg;
-    }
-
-    @Override
-    public void verify(TableState state) {
-      try {
-        Scanner kvs =
-            ((LocalTableState) state).getIndexedColumnsTableState(Arrays.asList(columns)).getFirst();
-
-        int count = 0;
-        KeyValue kv;
-        while ((kv = kvs.next()) != null) {
-          KeyValue next = expectedKvs.get(count++);
-          assertEquals(
-            msg + ": Unexpected kv in table state!\nexpected v1: "
-                + Bytes.toString(next.getValue()) + "\nactual v1:" + Bytes.toString(kv.getValue()),
-            next, kv);
-        }
-
-        assertEquals(msg + ": Didn't find enough kvs in table state!", expectedKvs.size(), count);
-      } catch (IOException e) {
-        fail(msg + ": Got an exception while reading local table state! " + e.getMessage());
-      }
-    }
-  }
-
-  private class VerifyingIndexCodec extends CoveredIndexCodecForTesting {
-
-    private Queue<TableStateVerifier> verifiers = new ArrayDeque<TableStateVerifier>();
-
-    @Override
-    public Iterable<IndexUpdate> getIndexDeletes(TableState state) {
-      verify(state);
-      return super.getIndexDeletes(state);
-    }
-
-    @Override
-    public Iterable<IndexUpdate> getIndexUpserts(TableState state) {
-      verify(state);
-      return super.getIndexUpserts(state);
-    }
-
-    private void verify(TableState state) {
-      TableStateVerifier verifier = verifiers.poll();
-      if (verifier == null) return;
-      verifier.verify(state);
-    }
-  }
-  
-  /**
-   * Test that we see the expected values in a {@link TableState} when doing single puts against a
-   * region.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExpectedResultsInTableStateForSinglePut() throws Exception {
-    //just do a simple Put to start with
-    long ts = state.ts;
-    Put p = new Put(row, ts);
-    p.add(family, qual, Bytes.toBytes("v1"));
-    
-    // get all the underlying kvs for the put
-    final List<KeyValue> expectedKvs = new ArrayList<KeyValue>();
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>();
-    allKvs.addAll(p.getFamilyMap().get(family));
-
-    // setup the verifier for the data we expect to write
-    // first call shouldn't have anything in the table
-    final ColumnReference familyRef =
-        new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS);
-
-    VerifyingIndexCodec codec = state.codec;
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", expectedKvs, familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 1", allKvs, familyRef));
-
-    // do the actual put (no indexing will actually be done)
-    HTable primary = state.table;
-    primary.put(p);
-    primary.flushCommits();
-
-    // now we do another put to the same row. We should see just the old row state, followed by the
-    // new + old
-    p = new Put(row, ts + 1);
-    p.add(family, qual, Bytes.toBytes("v2"));
-    expectedKvs.addAll(allKvs);
-    // add them first b/c the ts is newer
-    allKvs.addAll(0, p.get(family, qual));
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", expectedKvs, familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));
-    
-    // do the actual put
-    primary.put(p);
-    primary.flushCommits();
-
-    // cleanup after ourselves
-    cleanup(state);
-  }
-
-  /**
-   * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts.
-   * Previous implementations managed batches by playing current state against each element in the
-   * batch, rather than combining all the per-row updates into a single mutation for the batch. This
-   * test ensures that we see the correct expected state.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExpectedResultsInTableStateForBatchPuts() throws Exception {
-    long ts = state.ts;
-    // build up a list of puts to make, all on the same row
-    Put p1 = new Put(row, ts);
-    p1.add(family, qual, Bytes.toBytes("v1"));
-    Put p2 = new Put(row, ts + 1);
-    p2.add(family, qual, Bytes.toBytes("v2"));
-
-    // setup all the verifiers we need. This is just the same as above, but will be called twice
-    // since we need to iterate the batch.
-
-    // get all the underlying kvs for the put
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>(2);
-    allKvs.addAll(p2.getFamilyMap().get(family));
-    allKvs.addAll(p1.getFamilyMap().get(family));
-
-    // setup the verifier for the data we expect to write
-    // both puts should be put into a single batch
-    final ColumnReference familyRef =
-        new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS);
-    VerifyingIndexCodec codec = state.codec;
-    // no previous state in the table
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
-        .<KeyValue> emptyList(), familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family),
-        familyRef));
-
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyMap().get(family),
-        familyRef));
-    // kvs from both puts should be in the table now
-    codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));
-
-    // do the actual put (no indexing will actually be done)
-    HTable primary = state.table;
-    primary.setAutoFlush(false);
-    primary.put(Arrays.asList(p1, p2));
-    primary.flushCommits();
-
-    // cleanup after ourselves
-    cleanup(state);
-  }
-
-  /**
-   * @param tableName name of the table to create for the test
-   * @return the supporting state for the test
-   */
-  private TestState setupTest(String tableName) throws IOException {
-    byte[] tableNameBytes = Bytes.toBytes(tableName);
-    HTableDescriptor desc = new HTableDescriptor(tableNameBytes);
-    desc.addFamily(FAM1);
-    // add the necessary simple options to create the builder
-    Map<String, String> indexerOpts = new HashMap<String, String>();
-    // just need to set the codec - we are going to set it later, but we need something here or the
-    // initializer blows up.
-    indexerOpts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY,
-      CoveredIndexCodecForTesting.class.getName());
-    Indexer.enableIndexing(desc, CoveredColumnsIndexBuilder.class, indexerOpts);
-
-    // create the table
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    admin.createTable(desc);
-    HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);
-
-    // overwrite the codec so we can verify the current state
-    HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
-    Indexer indexer =
-        (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
-    CoveredColumnsIndexBuilder builder =
-        (CoveredColumnsIndexBuilder) indexer.getBuilderForTesting();
-    VerifyingIndexCodec codec = new VerifyingIndexCodec();
-    builder.setIndexCodecForTesting(codec);
-
-    // setup the Puts we want to write
-    final long ts = System.currentTimeMillis();
-    EnvironmentEdge edge = new EnvironmentEdge() {
-
-      @Override
-      public long currentTimeMillis() {
-        return ts;
-      }
-    };
-    EnvironmentEdgeManager.injectEdge(edge);
-
-    return new TestState(primary, codec, ts);
-  }
-
-  /**
-   * Cleanup the test based on the passed state.
-   * @param state
-   */
-  private void cleanup(TestState state) throws IOException {
-    EnvironmentEdgeManager.reset();
-    state.table.close();
-    UTIL.deleteTable(state.table.getTableName());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndToEndCoveredIndexing.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
deleted file mode 100644
index 5ac1ce9..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
+++ /dev/null
@@ -1,882 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexCodec;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexer;
-
-/**
- * Test Covered Column indexing in an 'end-to-end' manner on a minicluster. This covers cases where
- * we manage custom timestamped updates that arrive in and out of order as well as just using the
- * generically timestamped updates.
- */
-public class TestEndToEndCoveredIndexing {
-  private static final Log LOG = LogFactory.getLog(TestEndToEndCoveredIndexing.class);
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final String FAM_STRING = "FAMILY";
-  private static final byte[] FAM = Bytes.toBytes(FAM_STRING);
-  private static final String FAM2_STRING = "FAMILY2";
-  private static final byte[] FAM2 = Bytes.toBytes(FAM2_STRING);
-  private static final byte[] EMPTY_BYTES = new byte[0];
-  private static final byte[] indexed_qualifer = Bytes.toBytes("indexed_qual");
-  private static final byte[] regular_qualifer = Bytes.toBytes("reg_qual");
-  private static final byte[] row1 = Bytes.toBytes("row1");
-  private static final byte[] value1 = Bytes.toBytes("val1");
-  private static final byte[] value2 = Bytes.toBytes("val2");
-  private static final byte[] value3 = Bytes.toBytes("val3");
-  // match a single family:qualifier pair
-  private static final CoveredColumn col1 = new CoveredColumn(FAM_STRING, indexed_qualifer);
-  // matches the family2:* columns
-  private static final CoveredColumn col2 = new CoveredColumn(FAM2_STRING, null);
-  private static final CoveredColumn col3 = new CoveredColumn(FAM2_STRING, indexed_qualifer);
-  
-  @Rule
-  public TableName TestTable = new TableName();
-  
-  private ColumnGroup fam1;
-  private ColumnGroup fam2;
-
-  // setup a couple of index columns
-  private void setupColumns() {
-    fam1 = new ColumnGroup(getIndexTableName());
-    fam2 = new ColumnGroup(getIndexTableName() + "2");
-    // values are [col1][col2_1]...[col2_n]
-    fam1.add(col1);
-    fam1.add(col2);
-    // value is [col2]
-    fam2.add(col3);
-  }
-
-  private String getIndexTableName() {
-    return Bytes.toString(TestTable.getTableName()) + "_index";
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    setupColumns();
-  }
-
-  /**
-   * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index
-   * entries as expected
-   * @throws Exception on failure
-   */
-  @Test
-  public void testSimpleTimestampedUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-
-    // verify that the index matches
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Test that the multiple timestamps in a single put build the correct index updates.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleTimestampsInSinglePut() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10;
-    long ts2 = 11;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM, regular_qualifer, ts1, value2);
-    // our group indexes all columns in the this family, so any qualifier here is ok
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts1
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // check the second entry at ts2
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Test that we make updates to multiple {@link ColumnGroup}s across a single put/delete 
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleConcurrentGroupsUpdated() throws Exception {
-    HTable primary = createSetupTables(fam1, fam2);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    p.add(FAM2, indexed_qualifer, ts, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    HTable index2 = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // and check the second index as well
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index2, expected, ts, value3);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1, index2);
-  }
-
-  /**
-   * HBase has a 'fun' property wherein you can completely clobber an existing row if you make a
-   * {@link Put} at the exact same dimension (row, cf, cq, ts) as an existing row. The old row
-   * disappears and the new value (since the rest of the row is the same) completely subsumes it.
-   * This test ensures that we remove the old entry and put a new entry in its place.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testOverwritingPutsCorrectlyGetIndexed() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // now overwrite the put in the primary table with a new value
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value3);
-    // and verify that a scan at the first entry returns nothing (ignore the updated row)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts,
-      value1, value2);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-  
-  @Test
-  public void testSimpleDeletes() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a simple Put
-    long ts = 10;
-    Put p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    Delete d = new Delete(row1);
-    primary.delete(d);
-
-    HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    List<KeyValue> expected = Collections.<KeyValue> emptyList();
-    // scan over all time should cause the delete to be covered
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1,
-      HConstants.EMPTY_END_ROW);
-
-    // scan at the older timestamp should still show the older value
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(index, primary);
-  }
-
-  /**
-   * If we don't have any updates to make to the index, we don't take a lock on the WAL. However, we
-   * need to make sure that we don't try to unlock the WAL on write time when we don't write
-   * anything, since that will cause an java.lang.IllegalMonitorStateException
-   * @throws Exception on failure
-   */
-  @Test
-  public void testDeletesWithoutPreviousState() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a delete on the primary table (no data, so no index updates...hopefully).
-    long ts = 10;
-    Delete d = new Delete(row1);
-    primary.delete(d);
-
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    List<KeyValue> expected = Collections.<KeyValue> emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // a delete of a specific family/column should also not show any index updates
-    d = new Delete(row1);
-    d.deleteColumn(FAM, indexed_qualifer);
-    primary.delete(d);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // also just a family marker should have the same effect
-    d = new Delete(row1);
-    d.deleteFamily(FAM);
-    primary.delete(d);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // also just a family marker should have the same effect
-    d = new Delete(row1);
-    d.deleteColumns(FAM, indexed_qualifer);
-    primary.delete(d);
-    primary.flushCommits();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleTimestampsInSingleDelete() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10, ts2 = 11, ts3 = 12;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    // our group indexes all columns in the this family, so any qualifier here is ok
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // check to make sure everything we expect is there
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // ts1, we just have v1
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // at ts2, don't have the above anymore
-    pairs.clear();
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1);
-    // but we do have the new entry at ts2
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // now build up a delete with a couple different timestamps
-    Delete d = new Delete(row1);
-    // these deletes have to match the exact ts since we are doing an exact match (deleteColumn).
-    d.deleteColumn(FAM, indexed_qualifer, ts1);
-    // since this doesn't match exactly, we actually shouldn't see a change in table state
-    d.deleteColumn(FAM2, regular_qualifer, ts3);
-    primary.delete(d);
-
-    // at ts1, we should have the put covered exactly by the delete and into the entire future
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1,
-      value1);
-
-    // at ts2, we should just see value3
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // the later delete is a point delete, so we shouldn't see any change at ts3
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1,
-      HConstants.EMPTY_END_ROW);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
-   * given time. If its modifying the latest state, we don't need to do anything but add deletes. If
-   * its modifying back in time state, we need to just fix up the surrounding elements as anything
-   * else ahead of it will be fixed up by later updates.
-   * <p>
-   * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testDeleteColumnsInThePast() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10, ts2 = 11, ts3 = 12;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // now build up a delete with a couple different timestamps
-    Delete d = new Delete(row1);
-    // these deletes don't need to match the exact ts because they cover everything earlier
-    d.deleteColumns(FAM, indexed_qualifer, ts2);
-    d.deleteColumns(FAM2, regular_qualifer, ts3);
-    primary.delete(d);
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts1
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // delete at ts2 changes what the put would insert
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // final delete clears out everything
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-  
-  /**
-   * If the client is using custom timestamps is possible that the updates come out-of-order (i.e.
-   * update to ts 10 comes after the update to ts 12). In the case, we need to be sure that the
-   * index is correctly updated when the out of order put arrives.
-   * @throws Exception
-   */
-  @Test
-  public void testOutOfOrderUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 12;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    List<KeyValue> expectedTs1 = CoveredColumnIndexCodec
-        .getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, value1);
-
-    // now make a put back in time
-    long ts2 = ts - 2;
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check to make sure the back in time entry exists
-    List<KeyValue> expectedTs2 = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2,
-      pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs2, ts2, value2);
-    // then it should be gone at the newer ts (because it deletes itself)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts2,
-      ts + 1, value2, HConstants.EMPTY_END_ROW);
-
-    // but that the original index entry is still visible at ts, just fine
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Its possible (i.e. from a fast, frequently writing client) that they put more than the
-   * 'visible' number of versions in a row before a client make a put 'back in time' on that row. If
-   * we don't scan the current table properly, we won't see an index update for that 'back in time'
-   * update since the usual lookup will only see the regular number of versions. This ability to see
-   * back in time depends on running HBase 0.94.9
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExceedVersionsOutOfOrderPut() throws Exception {
-    // setup the index
-    HTable primary = createSetupTables(fam2);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5;
-    byte[] value4 = Bytes.toBytes("val4");
-    byte[] value5 = Bytes.toBytes("val5");
-    p.add(FAM2, indexed_qualifer, ts1, value1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts3, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts4, value4);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts5, value5);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      // the whole table, all the keys
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    /*
-     * now we have definitely exceeded the number of versions visible to a usual client of the
-     * primary table, so we should try doing a put 'back in time' an make sure that has the correct
-     * index values and cleanup
-     */
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // // read the index for the expected values
-    // HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-    //
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      // the whole table, all the keys
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col3));
-
-    // check the value1 should be present at the earliest timestamp
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts1, value1, value2);
-
-    // and value1 should be removed at ts2 (even though it came later)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts1,
-      ts2 + 1, value1, value2); // timestamp + 1 since its exclusive end timestamp
-
-    // late added column should be there just fine at its timestamp
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, value2);
-
-    // and check that the late entry also removes its self at the next timestamp up
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts3,
-      value2, value3);
-
-    // then we should have the rest of the inserts at their appropriate timestamps. Everything else
-    // should be exactly the same, except we shouldn't see ts0 anymore at ts2
-
-    // check the third entry
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts3, value3);
-
-    // check the fourth entry
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value4, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts4, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts4, value4);
-
-    // check the first entry at ts4
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, value2);
-    // verify that we remove the entry, even though its too far 'back in time'
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts3,
-      value4);
-
-    // cleanup
-    closeAndCleanupTables(primary, index);
-  }
-
-  /**
-   * Similar to {@link #testExceedVersionsOutOfOrderPut()}, but mingles deletes and puts.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExceedVersionsOutOfOrderUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // setup the data to store
-    long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5, ts6 = 6;
-    byte[] value4 = Bytes.toBytes("val4"), value5 = Bytes.toBytes("val5"), value6 =
-        Bytes.toBytes("val6");
-    // values for the other column to index
-    byte[] v1_1 = ArrayUtils.addAll(value1, Bytes.toBytes("_otherCol")), v3_1 =
-        ArrayUtils.addAll(value3, Bytes.toBytes("_otherCol")), v5_1 =
-        ArrayUtils.addAll(value5, Bytes.toBytes("_otherCol")), v6_1 =
-        ArrayUtils.addAll(value6, Bytes.toBytes("_otherCol"));
-
-    // make some puts to the primary table
-    Put p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM2, indexed_qualifer, ts1, v1_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts3, value3);
-    p.add(FAM2, indexed_qualifer, ts3, v3_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts5, value5);
-    p.add(FAM2, indexed_qualifer, ts5, v5_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts6, value6);
-    p.add(FAM2, indexed_qualifer, ts6, v6_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    /*
-     * now we have definitely exceeded the number of versions visible to a usual client of the
-     * primary table, so we should try doing a put 'back in time' an make sure that has the correct
-     * index values and cleanup
-     */
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index1.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-
-    // check the value1 should be present at the earliest timestamp
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1, value2);
-
-    // and value1 should be removed at ts2 (even though it came later)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts1,
-      ts2 + 1, value1, value2); // timestamp + 1 since its exclusive end timestamp
-
-    // late added column should be there just fine at its timestamp
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value2);
-
-    // and check that the late entry also removes its self at the next timestamp up
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts3,
-      value2, value3);
-
-    // -----------------------------------------------
-    // Check Delete intermingled
-    // -----------------------------------------------
-
-    // verify that the old row is there
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v3_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    // scan from the start key forward (should only include [value3][v3_3])
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, expected.get(0).getKey(),
-      value4);
-
-    // then do a delete of just one of the indexed columns. This should insert a delete for all just
-    // the single value, then a put & a later corresponding in the past for the new value
-    Delete d = new Delete(row1);
-    d.deleteColumn(FAM2, indexed_qualifer, ts3);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][v1_1] since that is next entry back
-    // in time from the current
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    // it should be re-written at 3
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);
-
-    // but we shouldn't find it at ts5 since it should be covered again
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts5,
-      value3, value4);
-
-    // now remove all the older columns in FAM2 at 4
-    d = new Delete(row1);
-    d.deleteColumns(FAM2, indexed_qualifer, ts4);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
-    // all the entries for that column
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts4, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts4, value3, value4);
-
-    // same as above, but now do it at 3 (on earlier)
-    d = new Delete(row1);
-    d.deleteColumns(FAM2, indexed_qualifer, ts3);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
-    // all the entries for that column
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);
-
-    // -----------------------------------------------
-    // then we should have the rest of the inserts at their appropriate timestamps. Everything else
-    // should be exactly the same, except we shouldn't see ts0 anymore at ts2
-    // -----------------------------------------------
-
-    // check the entry at 5
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value5, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v5_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts5, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts5, value5);
-
-    // check the entry at 6
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value6, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v6_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts6, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts6, value5);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Create the primary table (to which you should write), setup properly for indexing the given
-   * {@link ColumnGroup}s. Also creates the necessary index tables to match the passes groups.
-   * @param groups {@link ColumnGroup}s to index, creating one index table per column group.
-   * @return reference to the primary table
-   * @throws IOException if there is an issue communicating with HBase
-   */
-  private HTable createSetupTables(ColumnGroup... groups) throws IOException {
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    // setup the index
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    for (ColumnGroup group : groups) {
-      builder.addIndexGroup(group);
-      // create the index tables
-      CoveredColumnIndexer.createIndexTable(admin, group.getTable());
-    }
-
-    // setup the primary table
-    String indexedTableName = Bytes.toString(TestTable.getTableName());
-    HTableDescriptor pTable = new HTableDescriptor(indexedTableName);
-    pTable.addFamily(new HColumnDescriptor(FAM));
-    pTable.addFamily(new HColumnDescriptor(FAM2));
-    builder.build(pTable);
-
-    // create the primary table
-    admin.createTable(pTable);
-    HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName);
-    primary.setAutoFlush(false);
-    return primary;
-  }
-
-  private void closeAndCleanupTables(HTable... tables) throws IOException {
-    if (tables == null) {
-      return;
-    }
-
-    for (HTable table : tables) {
-      table.close();
-      UTIL.deleteTable(table.getTableName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
deleted file mode 100644
index 7cd495d..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
-import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
-import org.junit.BeforeClass;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.Indexer;
-
-/**
- * Test secondary indexing from an end-to-end perspective (client to server to index table).
- */
-public class TestEndtoEndIndexingWithCompression extends TestEndToEndCoveredIndexing {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    //add our codec and enable WAL compression
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    conf.set(WALEditCodec.WAL_EDIT_CODEC_CLASS_KEY,
-    IndexedWALEditCodec.class.getName());
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    
-    //start the mini-cluster
-    UTIL.startMiniCluster();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestFailWithoutRetries.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestFailWithoutRetries.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestFailWithoutRetries.java
deleted file mode 100644
index 8d9d65e..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestFailWithoutRetries.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.covered.IndexUpdate;
-import org.apache.phoenix.hbase.index.covered.TableState;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
-import org.apache.phoenix.index.BaseIndexCodec;
-
-/**
- * If {@link DoNotRetryIOException} is not subclassed correctly (with the {@link String}
- * constructor), {@link MultiResponse#readFields(java.io.DataInput)} will not correctly deserialize
- * the exception, and just return <tt>null</tt> to the client, which then just goes and retries.
- */
-public class TestFailWithoutRetries {
-
-  private static final Log LOG = LogFactory.getLog(TestFailWithoutRetries.class);
-  @Rule
-  public TableName table = new TableName();
-
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private String getIndexTableName() {
-    return Bytes.toString(table.getTableName()) + "_index";
-  }
-
-  public static class FailingTestCodec extends BaseIndexCodec {
-
-    @Override
-    public Iterable<IndexUpdate> getIndexDeletes(TableState state) throws IOException {
-      throw new RuntimeException("Intentionally failing deletes for "
-          + TestFailWithoutRetries.class.getName());
-    }
-
-    @Override
-    public Iterable<IndexUpdate> getIndexUpserts(TableState state) throws IOException {
-      throw new RuntimeException("Intentionally failing upserts for "
-          + TestFailWithoutRetries.class.getName());
-    }
-
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    // setup and verify the config
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf);
-    // start the cluster
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  /**
-   * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't
-   * rethrowing the exception correctly?
-   * <p>
-   * We use a custom codec to enforce the thrown exception.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testQuickFailure() throws Exception {
-    // incorrectly setup indexing for the primary table - target index table doesn't exist, which
-    // should quickly return to the client
-    byte[] family = Bytes.toBytes("family");
-    ColumnGroup fam1 = new ColumnGroup(getIndexTableName());
-    // values are [col1]
-    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    // add the index family
-    builder.addIndexGroup(fam1);
-    // usually, we would create the index table here, but we don't for the sake of the test.
-
-    // setup the primary table
-    String primaryTable = Bytes.toString(table.getTableName());
-    HTableDescriptor pTable = new HTableDescriptor(primaryTable);
-    pTable.addFamily(new HColumnDescriptor(family));
-    // override the codec so we can use our test one
-    builder.build(pTable, FailingTestCodec.class);
-
-    // create the primary table
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    admin.createTable(pTable);
-    Configuration conf = new Configuration(UTIL.getConfiguration());
-    // up the number of retries/wait time to make it obvious that we are failing with retries here
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20);
-    conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000);
-    HTable primary = new HTable(conf, primaryTable);
-    primary.setAutoFlush(false, true);
-
-    // do a simple put that should be indexed
-    Put p = new Put(Bytes.toBytes("row"));
-    p.add(family, null, Bytes.toBytes("value"));
-    primary.put(p);
-    try {
-      primary.flushCommits();
-      fail("Shouldn't have gotten a successful write to the primary table");
-    } catch (RetriesExhaustedWithDetailsException e) {
-      LOG.info("Correclty got a failure of the put!");
-    }
-    primary.close();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 758a9ef..4282af0 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -63,6 +63,19 @@
       <type>test-jar</type>
     </dependency>
   </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-failsafe-plugin</artifactId>
+      </plugin>
+    </plugins>
+  </build>
   
   <profiles>
     <!-- Profile for building against Hadoop 1. Active by default. Not used if another 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/8d6e2a58/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
----------------------------------------------------------------------
diff --git a/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java b/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
new file mode 100644
index 0000000..d81cce3
--- /dev/null
+++ b/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.flume;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.flume.Channel;
+import org.apache.flume.Context;
+import org.apache.flume.Sink;
+import org.apache.flume.SinkFactory;
+import org.apache.flume.channel.MemoryChannel;
+import org.apache.flume.conf.Configurables;
+import org.apache.flume.lifecycle.LifecycleState;
+import org.apache.flume.sink.DefaultSinkFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.phoenix.flume.serializer.EventSerializers;
+import org.apache.phoenix.flume.sink.PhoenixSink;
+import org.apache.phoenix.util.TestUtil;
+
+public class PhoenixSinkIT extends BaseHBaseManagedTimeIT {
+
+    private Context sinkContext;
+    private PhoenixSink sink;
+   
+   
+    @Test
+    public void testSinkCreation() {
+        SinkFactory factory = new DefaultSinkFactory ();
+        Sink sink = factory.create("PhoenixSink__", "org.apache.phoenix.flume.sink.PhoenixSink");
+        Assert.assertNotNull(sink);
+        Assert.assertTrue(PhoenixSink.class.isInstance(sink));
+    }
+    @Test
+    public void testConfiguration () {
+        
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+    }
+    
+    
+    
+    @Test(expected= NullPointerException.class)
+    public void testInvalidConfiguration () {
+        
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+    }
+    
+    @Test(expected=IllegalArgumentException.class)
+    public void testInvalidConfigurationOfSerializer () {
+        
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,"csv");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+    }
+    
+    @Test
+    public void testInvalidTable() {
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+      
+        final Channel channel = this.initChannel();
+        sink.setChannel(channel);
+        try {
+            sink.start();
+            fail();
+        }catch(Exception e) {
+            assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
+        }
+    }
+    
+    @Test
+    public void testSinkLifecycle () {
+        
+        String ddl = "CREATE TABLE flume_test " +
+                "  (flume_time timestamp not null, col1 varchar , col2 varchar" +
+                "  CONSTRAINT pk PRIMARY KEY (flume_time))\n";
+        
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
+        sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,"^([^\t]+)\t([^\t]+)$");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+        Assert.assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
+      
+        final Channel channel = this.initChannel();
+        sink.setChannel(channel);
+        
+        sink.start();
+        Assert.assertEquals(LifecycleState.START, sink.getLifecycleState());
+        sink.stop();
+        Assert.assertEquals(LifecycleState.STOP, sink.getLifecycleState());
+    }
+    
+    @Test
+    public void testCreateTable () throws Exception {
+        
+        String ddl = "CREATE TABLE flume_test " +
+                "  (flume_time timestamp not null, col1 varchar , col2 varchar" +
+                "  CONSTRAINT pk PRIMARY KEY (flume_time))\n";
+
+        final String fullTableName = "FLUME_TEST";
+        sinkContext = new Context ();
+        sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName);
+        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
+        sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,"^([^\t]+)\t([^\t]+)$");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
+        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
+
+        
+        sink = new PhoenixSink();
+        Configurables.configure(sink, sinkContext);
+        Assert.assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
+      
+        final Channel channel = this.initChannel();
+        sink.setChannel(channel);
+        
+        sink.start();
+        HBaseAdmin admin = driver.getConnectionQueryServices(TestUtil.PHOENIX_JDBC_URL, TestUtil.TEST_PROPERTIES).getAdmin();
+        try {
+            boolean exists = admin.tableExists(fullTableName);
+            Assert.assertTrue(exists);
+        }finally {
+            admin.close();
+        }
+    }
+    
+    private Channel initChannel() {
+        //Channel configuration
+        Context channelContext = new Context();
+        channelContext.put("capacity", "10000");
+        channelContext.put("transactionCapacity", "200");
+
+        Channel channel = new MemoryChannel();
+        channel.setName("memorychannel");
+        Configurables.configure(channel, channelContext);
+        return channel;
+    }
+    
+    
+}


Mime
View raw message