phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject [21/41] PHOENIX-130 Separate execution of slow (integration) tests from fast unit tests (GabrielReid)
Date Wed, 12 Mar 2014 22:40:02 GMT
http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexer.java
index f80cf41..9280b96 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexer.java
@@ -98,6 +98,7 @@ public class CoveredColumnIndexer extends CoveredColumnsIndexBuilder {
    * @param indexTable name of the index table.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public static void createIndexTable(HBaseAdmin admin, String indexTable) throws IOException {
     createIndexTable(admin, new HTableDescriptor(indexTable));
   }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
index bf6017e..6f7493f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
@@ -17,9 +17,6 @@
  */
 package org.apache.phoenix.hbase.index.covered.filter;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -33,7 +30,6 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.filter.FilterBase;
-
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
@@ -181,6 +177,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
    */
   class DeleteFamilyHinter implements Hinter {
 
+    @SuppressWarnings("deprecation")
     @Override
     public KeyValue getHint(KeyValue peeked) {
       // check to see if we have another column to seek
@@ -205,6 +202,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
    */
   class DeleteColumnHinter implements Hinter {
 
+    @SuppressWarnings("deprecation")
     @Override
     public KeyValue getHint(KeyValue kv) {
       return KeyValue.createLastOnRow(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
index 4a3a540..2ff0714 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.hbase.index.covered.filter;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -52,6 +48,7 @@ public class MaxTimestampFilter extends FilterBase {
     }
     int offset =kv.getTimestampOffset();
     //set the timestamp in the buffer
+    @SuppressWarnings("deprecation")
     byte[] buffer = kv.getBuffer();
     byte[] ts = Bytes.toBytes(this.ts);
     System.arraycopy(ts, 0, buffer, offset, ts.length);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
index ae0421d..4052d68 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.hbase.index.covered.update;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
@@ -80,6 +79,7 @@ public class ColumnReference implements Comparable<ColumnReference> {
         return this.qualifierPtr;
     }
 
+  @SuppressWarnings("deprecation")
   public boolean matches(KeyValue kv) {
     if (matchesFamily(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength())) {
       return matchesQualifier(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index bbd0bc7..dcfd9ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
 
 /**
@@ -75,6 +74,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
         return seekToNextUnfilteredKeyValue();
     }
 
+    @SuppressWarnings("deprecation")
     private boolean seekToNextUnfilteredKeyValue() throws IOException {
         while (true) {
             KeyValue peeked = delegate.peek();

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 2199b4f..4cbf6dd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -29,14 +29,14 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
-
-import com.google.common.collect.Maps;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.builder.IndexBuildingFailureException;
 import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.scanner.Scanner;
 
+import com.google.common.collect.Maps;
+
 /**
  * Utility class to help manage indexes
  */
@@ -100,6 +100,7 @@ public class IndexManagementUtil {
 
     }
 
+    @SuppressWarnings("deprecation")
     public static ValueGetter createGetterFromKeyValues(Collection<KeyValue> pendingUpdates) {
         final Map<ReferencingColumn, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates
                 .size());
@@ -168,6 +169,7 @@ public class IndexManagementUtil {
      * codec to determine if a given update should even be indexed. This assumes that for any index, there are going to
      * small number of columns, versus the number of kvs in any one batch.
      */
+    @SuppressWarnings("deprecation")
     public static boolean updateMatchesColumns(Collection<KeyValue> update, List<ColumnReference> columns) {
         // check to see if the kvs in the new update even match any of the columns requested
         // assuming that for any index, there are going to small number of columns, versus the number of
@@ -193,6 +195,7 @@ public class IndexManagementUtil {
      * This employs the same logic as {@link #updateMatchesColumns(Collection, List)}, but is flips the iteration logic
      * to search columns before kvs.
      */
+    @SuppressWarnings("deprecation")
     public static boolean columnMatchesUpdate(List<ColumnReference> columns, Collection<KeyValue> update) {
         boolean matches = false;
         outer: for (ColumnReference ref : columns) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
index 682a504..2ae89fb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
index 83426c1..1a225c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
@@ -252,6 +252,7 @@ public class MappedByteBufferSortedQueue extends AbstractQueue<ResultEntry> {
             return size;
         }
 
+        @SuppressWarnings("deprecation")
         public boolean writeResult(ResultEntry entry) throws IOException {
             if (flushBuffer)
                 throw new IOException("Results already flushed");

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
index adbb5a2..8ff96f8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
@@ -17,18 +17,25 @@
  */
 package org.apache.phoenix.join;
 
-import java.io.*;
+import java.io.ByteArrayInputStream;
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.IOException;
 import java.sql.SQLException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 
 import net.jcip.annotations.Immutable;
 
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
-import org.xerial.snappy.Snappy;
-
 import org.apache.phoenix.cache.HashCache;
 import org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory;
 import org.apache.phoenix.expression.Expression;
@@ -37,7 +44,11 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.*;
+import org.apache.phoenix.util.ResultUtil;
+import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.SizedUtil;
+import org.apache.phoenix.util.TupleUtil;
+import org.xerial.snappy.Snappy;
 
 public class HashCacheFactory implements ServerCacheFactory {
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/CSVBulkLoader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/CSVBulkLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/CSVBulkLoader.java
index 80a6922..f7d7527 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/CSVBulkLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/CSVBulkLoader.java
@@ -47,14 +47,15 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-
-import com.google.common.io.Closeables;
 import org.apache.phoenix.map.reduce.util.ConfigReader;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 
+import com.google.common.io.Closeables;
+
+@SuppressWarnings("deprecation")
 public class CSVBulkLoader {
 	private static final String UNDERSCORE = "_";
 	
@@ -106,7 +107,6 @@ public class CSVBulkLoader {
 	 * -help	Print all options (Optional)
 	 */
 
-	@SuppressWarnings("deprecation")
     	public static void main(String[] args) throws Exception{
 		
 		String inputFile = null;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/MapReduceJob.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/MapReduceJob.java b/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/MapReduceJob.java
index 9dc8032..21ab21a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/MapReduceJob.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/map/reduce/MapReduceJob.java
@@ -128,7 +128,8 @@ public class MapReduceJob {
 		*  Finally, do connection.rollback( to preserve table state).
 		*/
 		
-		@Override
+		@SuppressWarnings("deprecation")
+        @Override
 		public void map(LongWritable key, Text line, Context context) throws IOException, InterruptedException{
 			
 			CSVParser parser = new CSVParser(new InputStreamReader(new ByteArrayInputStream(line.toString().getBytes())),CSVFormat.EXCEL);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 27f5313..77c655e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -346,6 +346,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         return childQueryService;
     }
 
+    @SuppressWarnings("deprecation")
     @Override
     public void clearTableRegionCache(byte[] tableName) throws SQLException {
         connection.clearRegionCache(TableName.valueOf(tableName));
@@ -367,7 +368,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 List<HRegionLocation> locations = Lists.newArrayList();
                 byte[] currentKey = HConstants.EMPTY_START_ROW;
                 do {
-                  HRegionLocation regionLocation = connection.getRegionLocation(
+                  @SuppressWarnings("deprecation")
+                HRegionLocation regionLocation = connection.getRegionLocation(
                     TableName.valueOf(tableName), currentKey, reload);
                   locations.add(regionLocation);
                   currentKey = regionLocation.getRegionInfo().getEndKey();
@@ -901,6 +903,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      * Invoke meta data coprocessor with one retry if the key was found to not be in the regions
      * (due to a table split)
      */
+    @SuppressWarnings("deprecation")
     private MetaDataMutationResult metaDataCoprocessorExec(byte[] tableKey,
             Batch.Call<MetaDataService, MetaDataResponse> callable) throws SQLException {
         try {
@@ -1616,6 +1619,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         incrementSequenceValues(sequenceKeys, timestamp, values, exceptions, 1, Sequence.Action.RESERVE);
     }
 
+    @SuppressWarnings("deprecation")
     private void incrementSequenceValues(List<SequenceKey> keys, long timestamp, long[] values, SQLException[] exceptions, int factor, Sequence.Action action) throws SQLException {
         List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
         for (SequenceKey key : keys) {
@@ -1688,6 +1692,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         }
     }
 
+    @SuppressWarnings("deprecation")
     @Override
     public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
         List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
@@ -1761,6 +1766,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     // Take no locks, as this only gets run when there are no open connections
     // so there's no danger of contention.
+    @SuppressWarnings("deprecation")
     private void returnAllSequences(ConcurrentMap<SequenceKey,Sequence> sequenceMap) throws SQLException {
         List<Append> mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size());
         for (Sequence sequence : sequenceMap.values()) {
@@ -2136,6 +2142,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      * @throws IOException
      * @throws SQLException
      */
+    @SuppressWarnings("deprecation")
     private WhiteList upgradeCoprocessorsTo3_0(HBaseAdmin admin, boolean forceUpgrade) throws IOException, SQLException {
         String files = config.get(QueryServices.AUTO_UPGRADE_WHITELIST_ATTRIB);
         WhiteList coprocUpgradeWhiteList = new WhiteList(files);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
index 447267c..da62da3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
@@ -46,6 +46,7 @@ public interface HTableFactory {
      * Default implementation.  Uses standard HBase HTables.
      */
     static class HTableFactoryImpl implements HTableFactory {
+        @SuppressWarnings("deprecation")
         @Override
         public HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException {
             return new HTable(tableName, connection, pool);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
index b3550cf..2356b2a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.schema;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.phoenix.util.SizedUtil;
 
 public class DelegateColumn extends DelegateDatum implements PColumn {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 8619359..32c979a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1238,12 +1238,10 @@ public class MetaDataClient {
             Long scn = connection.getSCN();
             long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
             List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
-            @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
             Delete tableDelete = new Delete(key, clientTimeStamp);
             tableMetaData.add(tableDelete);
             if (parentTableName != null) {
                 byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
-                @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
                 Delete linkDelete = new Delete(linkKey, clientTimeStamp);
                 tableMetaData.add(linkDelete);
             }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadWriteKeyValuesWithCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadWriteKeyValuesWithCodec.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadWriteKeyValuesWithCodec.java
deleted file mode 100644
index 182b5c4..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadWriteKeyValuesWithCodec.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.io.util.LRUDictionary;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
-
-/**
- * Simple test to read/write simple files via our custom {@link WALEditCodec} to ensure properly
- * encoding/decoding without going through a cluster.
- */
-public class TestReadWriteKeyValuesWithCodec {
-
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final byte[] ROW = Bytes.toBytes("row");
-  private static final byte[] FAMILY = Bytes.toBytes("family");
-
-  @BeforeClass
-  public static void setupCodec() {
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName());
-  }
-
-  @Test
-  public void testWithoutCompression() throws Exception {
-    // get the FS ready to read/write the edits
-    Path testDir = UTIL.getDataTestDir("TestReadWriteCustomEdits_withoutCompression");
-    Path testFile = new Path(testDir, "testfile");
-    FileSystem fs = UTIL.getTestFileSystem();
-
-    List<WALEdit> edits = getEdits();
-    writeReadAndVerify(null, fs, edits, testFile);
-  }
-
-  @Test
-  public void testWithCompression() throws Exception {
-    // get the FS ready to read/write the edit
-    Path testDir = UTIL.getDataTestDir("TestReadWriteCustomEdits_withCompression");
-    Path testFile = new Path(testDir, "testfile");
-    FileSystem fs = UTIL.getTestFileSystem();
-
-    List<WALEdit> edits = getEdits();
-    CompressionContext compression = new CompressionContext(LRUDictionary.class, false, false);
-    writeReadAndVerify(compression, fs, edits, testFile);
-  }
-
-  /**
-   * @return a bunch of {@link WALEdit}s that test a range of serialization possibilities.
-   */
-  private List<WALEdit> getEdits() {
-    // Build up a couple of edits
-    List<WALEdit> edits = new ArrayList<WALEdit>();
-    Put p = new Put(ROW);
-    p.add(FAMILY, null, Bytes.toBytes("v1"));
-
-    WALEdit withPut = new WALEdit();
-    addMutation(withPut, p, FAMILY);
-    edits.add(withPut);
-
-    Delete d = new Delete(ROW);
-    d.deleteColumn(FAMILY, null);
-    WALEdit withDelete = new WALEdit();
-    addMutation(withDelete, d, FAMILY);
-    edits.add(withDelete);
-    
-    WALEdit withPutsAndDeletes = new WALEdit();
-    addMutation(withPutsAndDeletes, d, FAMILY);
-    addMutation(withPutsAndDeletes, p, FAMILY);
-    edits.add(withPutsAndDeletes);
-    
-    WALEdit justIndexUpdates = new WALEdit();
-    byte[] table = Bytes.toBytes("targetTable");
-    IndexedKeyValue ikv = new IndexedKeyValue(table, p);
-    justIndexUpdates.add(ikv);
-    edits.add(justIndexUpdates);
-
-    WALEdit mixed = new WALEdit();
-    addMutation(mixed, d, FAMILY);
-    mixed.add(ikv);
-    addMutation(mixed, p, FAMILY);
-    edits.add(mixed);
-
-    return edits;
-  }
-
-  /**
-   * Add all the {@link KeyValue}s in the {@link Mutation}, for the pass family, to the given
-   * {@link WALEdit}.
-   */
-  private void addMutation(WALEdit edit, Mutation m, byte[] family) {
-    List<Cell> kvs = m.getFamilyCellMap().get(FAMILY);
-    for (Cell kv : kvs) {
-      edit.add(KeyValueUtil.ensureKeyValue(kv));
-    }
-  }
-
-  
-  private void writeWALEdit(WALCellCodec codec, List<KeyValue> kvs, FSDataOutputStream out) throws IOException {
-    out.writeInt(kvs.size());
-    Codec.Encoder cellEncoder = codec.getEncoder(out);
-    // We interleave the two lists for code simplicity
-    for (KeyValue kv : kvs) {
-        cellEncoder.write(kv);
-    }
-  }
-  
-  /**
-   * Write the edits to the specified path on the {@link FileSystem} using the given codec and then
-   * read them back in and ensure that we read the same thing we wrote.
-   */
-  private void writeReadAndVerify(final CompressionContext compressionContext, FileSystem fs, List<WALEdit> edits,
-      Path testFile) throws IOException {
-	  
-	WALCellCodec codec = WALCellCodec.create(UTIL.getConfiguration(), compressionContext);  
-    // write the edits out
-    FSDataOutputStream out = fs.create(testFile);
-    for (WALEdit edit : edits) {
-      writeWALEdit(codec, edit.getKeyValues(), out);
-    }
-    out.close();
-
-    // read in the edits
-    FSDataInputStream in = fs.open(testFile);
-    List<WALEdit> read = new ArrayList<WALEdit>();
-    for (int i = 0; i < edits.size(); i++) {
-      WALEdit edit = new WALEdit();
-      int numEdits = in.readInt();
-      edit.readFromCells(codec.getDecoder(in), numEdits);
-      read.add(edit);
-    }
-    in.close();
-
-    // make sure the read edits match the written
-    for(int i=0; i< edits.size(); i++){
-      WALEdit expected = edits.get(i);
-      WALEdit found = read.get(i);
-      for(int j=0; j< expected.getKeyValues().size(); j++){
-        KeyValue fkv = found.getKeyValues().get(j);
-        KeyValue ekv = expected.getKeyValues().get(j);
-        assertEquals("KV mismatch for edit! Expected: "+expected+", but found: "+found, ekv, fkv);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
deleted file mode 100644
index 2c8be76..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.phoenix.util.ConfigUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexer;
-
-/**
- * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits w/o compression. Post
- * 0.94.9 we can support a custom {@link WALEditCodec}, which handles reading/writing the compressed
- * edits.
- * <p>
- * Most of the underlying work (creating/splitting the WAL, etc) is from
- * org.apache.hadoop.hhbase.regionserver.wal.TestWALReplay, copied here for completeness and ease of
- * use.
- * <p>
- * This test should only have a single test - otherwise we will start/stop the minicluster multiple
- * times, which is probably not what you want to do (mostly because its so much effort).
- */
-public class TestWALReplayWithIndexWritesAndCompressedWAL {
-
-  public static final Log LOG = LogFactory.getLog(TestWALReplay.class);
-  @Rule
-  public TableName table = new TableName();
-  private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
-
-  final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private Path hbaseRootDir = null;
-  private Path oldLogDir;
-  private Path logDir;
-  private FileSystem fs;
-  private Configuration conf;
-
-  @Before
-  public void setUp() throws Exception {
-    setupCluster();
-    this.conf = HBaseConfiguration.create(UTIL.getConfiguration());
-    this.fs = UTIL.getDFSCluster().getFileSystem();
-    this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
-    this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
-    // reset the log reader to ensure we pull the one from this config
-    HLogFactory.resetLogReaderClass();
-  }
-
-  private void setupCluster() throws Exception {
-    configureCluster();
-    startCluster();
-  }
-
-  protected void configureCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    setDefaults(conf);
-
-    // enable WAL compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    // set replication required parameter
-    ConfigUtil.setReplicationConfigIfAbsent(conf);
-  }
-
-  protected final void setDefaults(Configuration conf) {
-    // make sure writers fail quickly
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
-    conf.setInt(HConstants.HBASE_CLIENT_PAUSE, 1000);
-    conf.setInt("zookeeper.recovery.retry", 3);
-    conf.setInt("zookeeper.recovery.retry.intervalmill", 100);
-    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 30000);
-    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 5000);
-    // enable appends
-    conf.setBoolean("dfs.support.append", true);
-    IndexTestingUtils.setupConfig(conf);
-  }
-
-  protected void startCluster() throws Exception {
-    UTIL.startMiniDFSCluster(3);
-    UTIL.startMiniZKCluster();
-    UTIL.startMiniHBaseCluster(1, 1);
-
-    Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    LOG.info("hbase.rootdir=" + hbaseRootDir);
-    UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    UTIL.shutdownMiniHBaseCluster();
-    UTIL.shutdownMiniDFSCluster();
-    UTIL.shutdownMiniZKCluster();
-  }
-
-
-  private void deleteDir(final Path p) throws IOException {
-    if (this.fs.exists(p)) {
-      if (!this.fs.delete(p, true)) {
-        throw new IOException("Failed remove of " + p);
-      }
-    }
-  }
-
-  /**
-   * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
-   * seqids.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testReplayEditsWrittenViaHRegion() throws Exception {
-    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
-    final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), 
-        null, null, false);
-    final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
-    deleteDir(basedir);
-    final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
-    
-    //setup basic indexing for the table
-    // enable indexing to a non-existant index table
-    byte[] family = new byte[] { 'a' };
-    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
-    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    builder.addIndexGroup(fam1);
-    builder.build(htd);
-
-    // create the region + its WAL
-    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
-    region0.close();
-    region0.getLog().closeAndDelete();
-    HLog wal = createWAL(this.conf);
-    RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
-    // mock out some of the internals of the RSS, so we can run CPs
-    Mockito.when(mockRS.getWAL(null)).thenReturn(wal);
-    RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
-    Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
-    ServerName mockServerName = Mockito.mock(ServerName.class);
-    Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
-    Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
-    HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
-    region.initialize();
-    region.getSequenceId().set(0);
-
-    //make an attempted write to the primary that should also be indexed
-    byte[] rowkey = Bytes.toBytes("indexed_row_key");
-    Put p = new Put(rowkey);
-    p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
-    region.put(p);
-
-    // we should then see the server go down
-    Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
-      Mockito.any(Exception.class));
-    region.close(true);
-    wal.close();
-
-    // then create the index table so we are successful on WAL replay
-    CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);
-
-    // run the WAL split and setup the region
-    runWALSplit(this.conf);
-    HLog wal2 = createWAL(this.conf);
-    HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);
-
-    // initialize the region - this should replay the WALEdits from the WAL
-    region1.initialize();
-
-    // now check to ensure that we wrote to the index table
-    HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
-    int indexSize = getKeyValueCount(index);
-    assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
-    Get g = new Get(rowkey);
-    final Result result = region1.get(g);
-    assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());
-
-    // cleanup the index table
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    admin.disableTable(INDEX_TABLE_NAME);
-    admin.deleteTable(INDEX_TABLE_NAME);
-    admin.close();
-  }
-
-  /**
-   * Create simple HTD with three families: 'a', 'b', and 'c'
-   * @param tableName name of the table descriptor
-   * @return
-   */
-  private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
-    htd.addFamily(a);
-    HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
-    htd.addFamily(b);
-    HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
-    htd.addFamily(c);
-    return htd;
-  }
-
-  /*
-   * @param c
-   * @return WAL with retries set down from 5 to 1 only.
-   * @throws IOException
-   */
-  private HLog createWAL(final Configuration c) throws IOException {
-    HLog wal = HLogFactory.createHLog(FileSystem.get(c), logDir, "localhost,1234", c);
-    // Set down maximum recovery so we dfsclient doesn't linger retrying something
-    // long gone.
-    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
-    return wal;
-  }
-
-  /*
-   * Run the split. Verify only single split file made.
-   * @param c
-   * @return The single split file made
-   * @throws IOException
-   */
-  private Path runWALSplit(final Configuration c) throws IOException {
-    FileSystem fs = FileSystem.get(c);
-    
-    List<Path> splits = HLogSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"),
-        this.oldLogDir, fs, c);
-    // Split should generate only 1 file since there's only 1 region
-    assertEquals("splits=" + splits, 1, splits.size());
-    // Make sure the file exists
-    assertTrue(fs.exists(splits.get(0)));
-    LOG.info("Split file=" + splits.get(0));
-    return splits.get(0);
-  }
-
-  private int getKeyValueCount(HTable table) throws IOException {
-    Scan scan = new Scan();
-    scan.setMaxVersions(Integer.MAX_VALUE - 1);
-
-    ResultScanner results = table.getScanner(scan);
-    int count = 0;
-    for (Result res : results) {
-      count += res.list().size();
-      System.out.println(count + ") " + res);
-    }
-    results.close();
-
-    return count;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
deleted file mode 100644
index cc7a111..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-
-import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
-import org.apache.phoenix.util.ConfigUtil;
-
-/**
- * Do the WAL Replay test but with the WALEditCodec, rather than an {@link IndexedHLogReader}, but
- * still with compression
- */
-public class TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9 extends TestWALReplayWithIndexWritesAndCompressedWAL {
-
-  @Override
-  protected void configureCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    setDefaults(conf);
-    LOG.info("Setting HLog impl to indexed log reader");
-    conf.set(IndexManagementUtil.HLOG_READER_IMPL_KEY, IndexedHLogReader.class.getName());
-
-    // disable WAL compression
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
-    // set replication required parameter
-    ConfigUtil.setReplicationConfigIfAbsent(conf);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/df23cf97/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
deleted file mode 100644
index 6b19d42..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/AlterTableTest.java
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.apache.phoenix.util.TestUtil.closeConnection;
-import static org.apache.phoenix.util.TestUtil.closeStatement;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.math.BigDecimal;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Properties;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.Test;
-
-
-
-public class AlterTableTest extends BaseHBaseManagedTimeTest {
-    public static final String SCHEMA_NAME = "";
-    public static final String DATA_TABLE_NAME = "T";
-    public static final String INDEX_TABLE_NAME = "I";
-    public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
-    public static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
-
-
-    @Test
-    public void testAlterTableWithVarBinaryKey() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        
-        try {
-            String ddl = "CREATE TABLE test_table " +
-                    "  (a_string varchar not null, a_binary varbinary not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n";
-            createTestTable(getUrl(), ddl);
-            
-            ddl = "ALTER TABLE test_table ADD b_string VARCHAR NULL PRIMARY KEY";
-            PreparedStatement stmt = conn.prepareStatement(ddl);
-            stmt.execute();
-            fail("Should have caught bad alter.");
-        } catch (SQLException e) {
-            assertEquals(SQLExceptionCode.VARBINARY_LAST_PK.getErrorCode(), e.getErrorCode());
-        } finally {
-            conn.close();
-        }
-    }
-
-    @Test
-    public void testAddVarCharColToPK() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        
-        try {
-            String ddl = "CREATE TABLE test_table " +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
-            conn.createStatement().execute(ddl);
-            
-            String dml = "UPSERT INTO test_table VALUES(?)";
-            PreparedStatement stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "b");
-            stmt.execute();
-            stmt.setString(1, "a");
-            stmt.execute();
-            conn.commit();
-            
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("a",rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("b",rs.getString(1));
-            assertFalse(rs.next());
-            
-            ddl = "ALTER TABLE test_table ADD  b_string VARCHAR  NULL PRIMARY KEY  ";
-            conn.createStatement().execute(ddl);
-            
-            query = "SELECT * FROM test_table WHERE a_string = 'a' AND b_string IS NULL";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("a",rs.getString(1));
-            assertFalse(rs.next());
-            
-            dml = "UPSERT INTO test_table VALUES(?)";
-            stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "c");
-            stmt.execute();
-            conn.commit();
-           
-            query = "SELECT * FROM test_table WHERE a_string = 'c' AND b_string IS NULL";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("c",rs.getString(1));
-            assertFalse(rs.next());
-            
-            dml = "UPSERT INTO test_table(a_string,col1) VALUES(?,?)";
-            stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "a");
-            stmt.setInt(2, 5);
-            stmt.execute();
-            conn.commit();
-           
-            query = "SELECT a_string,col1 FROM test_table WHERE a_string = 'a' AND b_string IS NULL";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("a",rs.getString(1));
-            assertEquals(5,rs.getInt(2)); // TODO: figure out why this flaps
-            assertFalse(rs.next());
-            
-        } finally {
-            conn.close();
-        }
-    }
-    
-
-    
-    @Test
-    public void testAlterColumnFamilyProperty() throws Exception {
-
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        
-        String ddl = "CREATE TABLE test_table " +
-                "  (a_string varchar not null, col1 integer" +
-                "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
-        try {
-                conn.createStatement().execute(ddl);
-              
-                conn.createStatement().execute("ALTER TABLE TEST_TABLE ADD col2 integer IN_MEMORY=true");
-                
-                HTableInterface htable1 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("TEST_TABLE")); 
-                HTableDescriptor htableDesciptor1 = htable1.getTableDescriptor();
-                HColumnDescriptor hcolumnDescriptor1 = htableDesciptor1.getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
-                assertNotNull(hcolumnDescriptor1);
-               
-                try {
-                    
-                    conn.createStatement().execute("ALTER TABLE TEST_TABLE SET IN_MEMORY=false");
-                    fail("Should have caught exception.");
-                    
-                } catch (SQLException e) {
-                    assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1025 (42Y84): Unsupported property set in ALTER TABLE command."));
-                } 
-        }finally {
-            conn.close();
-        }
-     }
-  
-    private static void assertIndexExists(Connection conn, boolean exists) throws SQLException {
-        ResultSet rs = conn.getMetaData().getIndexInfo(null, SCHEMA_NAME, DATA_TABLE_NAME, false, false);
-        assertEquals(exists, rs.next());
-    }
-    
-    @Test
-    public void testDropIndexedColumn() throws Exception {
-        String query;
-        ResultSet rs;
-        PreparedStatement stmt;
-    
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-    
-        // make sure that the tables are empty, but reachable
-        conn.createStatement().execute(
-          "CREATE TABLE " + DATA_TABLE_FULL_NAME
-              + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        conn.createStatement().execute(
-          "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1, v2)");
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "x");
-        stmt.setString(3, "1");
-        stmt.execute();
-        conn.commit();
-        
-        assertIndexExists(conn,true);
-        conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " DROP COLUMN v1");
-        assertIndexExists(conn,false);
-        
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("1",rs.getString(2));
-        assertFalse(rs.next());
-        
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "2");
-        stmt.execute();
-        conn.commit();
-        
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("2",rs.getString(2));
-        assertFalse(rs.next());
-    }
-    
-    @Test
-    public void testDropCoveredColumn() throws Exception {
-        String query;
-        ResultSet rs;
-        PreparedStatement stmt;
-    
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-    
-        // make sure that the tables are empty, but reachable
-        conn.createStatement().execute(
-          "CREATE TABLE " + DATA_TABLE_FULL_NAME
-              + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR)");
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        conn.createStatement().execute(
-          "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) include (v2, v3)");
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "x");
-        stmt.setString(3, "1");
-        stmt.setString(4, "j");
-        stmt.execute();
-        conn.commit();
-        
-        assertIndexExists(conn,true);
-        conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " DROP COLUMN v2");
-        // TODO: verify meta data that we get back to confirm our column was dropped
-        assertIndexExists(conn,true);
-        
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertEquals("j",rs.getString(3));
-        assertFalse(rs.next());
-        
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "y");
-        stmt.setString(3, "k");
-        stmt.execute();
-        conn.commit();
-        
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertEquals("k",rs.getString(3));
-        assertFalse(rs.next());
-    }
-    
-    @Test
-    public void testAddPKColumnToTableWithIndex() throws Exception {
-        String query;
-        ResultSet rs;
-        PreparedStatement stmt;
-    
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-    
-        // make sure that the tables are empty, but reachable
-        conn.createStatement().execute(
-          "CREATE TABLE " + DATA_TABLE_FULL_NAME
-              + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        conn.createStatement().execute(
-          "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) include (v2)");
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-    
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "a");
-        stmt.setString(2, "x");
-        stmt.setString(3, "1");
-        stmt.execute();
-        conn.commit();
-        
-        assertIndexExists(conn,true);
-        conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " ADD v3 VARCHAR, k2 DECIMAL PRIMARY KEY");
-        rs = conn.getMetaData().getPrimaryKeys("", SCHEMA_NAME, DATA_TABLE_NAME);
-        assertTrue(rs.next());
-        assertEquals("K",rs.getString("COLUMN_NAME"));
-        assertEquals(1, rs.getShort("KEY_SEQ"));
-        assertTrue(rs.next());
-        assertEquals("K2",rs.getString("COLUMN_NAME"));
-        assertEquals(2, rs.getShort("KEY_SEQ"));
-
-        rs = conn.getMetaData().getPrimaryKeys("", SCHEMA_NAME, INDEX_TABLE_NAME);
-        assertTrue(rs.next());
-        assertEquals(QueryConstants.DEFAULT_COLUMN_FAMILY + IndexUtil.INDEX_COLUMN_NAME_SEP + "V1",rs.getString("COLUMN_NAME"));
-        assertEquals(1, rs.getShort("KEY_SEQ"));
-        assertTrue(rs.next());
-        assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K",rs.getString("COLUMN_NAME"));
-        assertEquals(2, rs.getShort("KEY_SEQ"));
-        assertTrue(rs.next());
-        assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K2",rs.getString("COLUMN_NAME"));
-        assertEquals(3, rs.getShort("KEY_SEQ"));
-        
-        assertIndexExists(conn,true);
-        
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertEquals("1",rs.getString(3));
-        assertNull(rs.getBigDecimal(4));
-        assertFalse(rs.next());
-        
-        // load some data into the table
-        stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + "(K,K2,V1,V2) VALUES(?,?,?,?)");
-        stmt.setString(1, "b");
-        stmt.setBigDecimal(2, BigDecimal.valueOf(2));
-        stmt.setString(3, "y");
-        stmt.setString(4, "2");
-        stmt.execute();
-        conn.commit();
-        
-        query = "SELECT k,k2 FROM " + DATA_TABLE_FULL_NAME + " WHERE v1='y'";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals(BigDecimal.valueOf(2),rs.getBigDecimal(2));
-        assertFalse(rs.next());
-    }
-    
-    @Test
-    public void testSetSaltedTableAsImmutable() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        
-        try {
-            String ddl = "CREATE TABLE MESSAGES (\n" + 
-            		"        SENDER_ID UNSIGNED_LONG NOT NULL,\n" + 
-            		"        RECIPIENT_ID UNSIGNED_LONG NOT NULL,\n" + 
-            		"        M_TIMESTAMP DATE  NOT NULL,\n" + 
-            		"        ROW_ID UNSIGNED_LONG NOT NULL,\n" + 
-            		"        IS_READ TINYINT,\n" + 
-            		"        IS_DELETED TINYINT,\n" + 
-            		"        VISIBILITY TINYINT,\n" + 
-            		"        B.SENDER_IP VARCHAR,\n" + 
-            		"        B.JSON VARCHAR,\n" + 
-            		"        B.M_TEXT VARCHAR\n" + 
-            		"        CONSTRAINT ROWKEY PRIMARY KEY\n" + 
-            		"(SENDER_ID,RECIPIENT_ID,M_TIMESTAMP DESC,ROW_ID))\n" + 
-            		"SALT_BUCKETS=4";
-            conn.createStatement().execute(ddl);
-            
-            ddl = "ALTER TABLE MESSAGES SET IMMUTABLE_ROWS=true";
-            conn.createStatement().execute(ddl);
-            
-            conn.createStatement().executeQuery("select count(*) from messages").next();
-            
-        } finally {
-            conn.close();
-        }
-    }
-    
-    
-    @Test
-    public void testDropColumnFromSaltedTable() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        
-        try {
-            String ddl = "CREATE TABLE MESSAGES (\n" + 
-                    "        SENDER_ID UNSIGNED_LONG NOT NULL,\n" + 
-                    "        RECIPIENT_ID UNSIGNED_LONG NOT NULL,\n" + 
-                    "        M_TIMESTAMP DATE  NOT NULL,\n" + 
-                    "        ROW_ID UNSIGNED_LONG NOT NULL,\n" + 
-                    "        IS_READ TINYINT,\n" + 
-                    "        IS_DELETED TINYINT,\n" + 
-                    "        VISIBILITY TINYINT,\n" + 
-                    "        B.SENDER_IP VARCHAR,\n" + 
-                    "        B.JSON VARCHAR,\n" + 
-                    "        B.M_TEXT VARCHAR\n" + 
-                    "        CONSTRAINT ROWKEY PRIMARY KEY\n" + 
-                    "(SENDER_ID,RECIPIENT_ID,M_TIMESTAMP DESC,ROW_ID))\n" + 
-                    "SALT_BUCKETS=4";
-            conn.createStatement().execute(ddl);
-            
-            ddl = "ALTER TABLE MESSAGES DROP COLUMN B.JSON";
-            conn.createStatement().execute(ddl);
-            
-            conn.createStatement().executeQuery("select count(*) from messages").next();
-        } finally {
-            conn.close();
-        }
-
-    }
-    
-    
-    @Test
-    public void testAddVarCols() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        
-        try {
-            String ddl = "CREATE TABLE test_table " +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
-            conn.createStatement().execute(ddl);
-            
-            String dml = "UPSERT INTO test_table VALUES(?)";
-            PreparedStatement stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "b");
-            stmt.execute();
-            stmt.setString(1, "a");
-            stmt.execute();
-            conn.commit();
-            
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("a",rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("b",rs.getString(1));
-            assertFalse(rs.next());
-            
-            
-            query = "SELECT * FROM test_table WHERE a_string = 'a' ";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("a",rs.getString(1));
-          
-            ddl = "ALTER TABLE test_table ADD  c1.col2 VARCHAR  , c1.col3 integer , c2.col4 integer";
-            conn.createStatement().execute(ddl);
-            
-            ddl = "ALTER TABLE test_table ADD   col5 integer , c1.col2 VARCHAR";
-            try {
-                conn.createStatement().execute(ddl);
-                fail();
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.COLUMN_EXIST_IN_DEF.getErrorCode(), e.getErrorCode());
-            }
-            
-            query = "SELECT col5 FROM test_table";
-            try {
-                conn.createStatement().executeQuery(query);
-                fail(); 
-            } catch(SQLException e) {
-                assertTrue(e.getMessage(), e.getMessage().contains("ERROR 504 (42703): Undefined column."));
-            }
-       
-            ddl = "ALTER TABLE test_table ADD IF NOT EXISTS col5 integer , c1.col2 VARCHAR";
-            conn.createStatement().execute(ddl);
-            
-            dml = "UPSERT INTO test_table VALUES(?,?,?,?,?)";
-            stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "c");
-            stmt.setInt(2, 100);
-            stmt.setString(3, "d");
-            stmt.setInt(4, 101);
-            stmt.setInt(5, 102);
-            stmt.execute();
-            conn.commit();
-           
-            query = "SELECT * FROM test_table WHERE a_string = 'c' ";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("c",rs.getString(1));
-            assertEquals(100,rs.getInt(2));
-            assertEquals("d",rs.getString(3));
-            assertEquals(101,rs.getInt(4));
-            assertEquals(102,rs.getInt(5));
-            assertFalse(rs.next());
-            
-            ddl = "ALTER TABLE test_table ADD  col5 integer";
-            conn.createStatement().execute(ddl);
-            
-            query = "SELECT c1.* FROM test_table WHERE a_string = 'c' ";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("d",rs.getString(1));
-            assertEquals(101,rs.getInt(2));
-            assertFalse(rs.next());
-            
-            
-            dml = "UPSERT INTO test_table(a_string,col1,col5) VALUES(?,?,?)";
-            stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "e");
-            stmt.setInt(2, 200);
-            stmt.setInt(3, 201);
-            stmt.execute();
-            conn.commit();
-            
-            
-            query = "SELECT a_string,col1,col5 FROM test_table WHERE a_string = 'e' ";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("e",rs.getString(1));
-            assertEquals(200,rs.getInt(2));
-            assertEquals(201,rs.getInt(3));
-            assertFalse(rs.next());
-            
-          } finally {
-            conn.close();
-        }
-    }
-
-    @Test
-    public void testDropVarCols() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        try {
-            String ddl = "CREATE TABLE test_table " + "  (a_string varchar not null, col1 integer, cf1.col2 integer"
-                    + "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
-            conn.createStatement().execute(ddl);
-
-            ddl = "ALTER TABLE test_table DROP COLUMN col1";
-            conn.createStatement().execute(ddl);
-
-            ddl = "ALTER TABLE test_table DROP COLUMN cf1.col2";
-            conn.createStatement().execute(ddl);
-        } finally {
-            conn.close();
-        }
-    }
-    
-    @Test
-    public void testDisallowAddingNotNullableColumnNotPartOfPkForExistingTable() throws Exception {
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = null;
-        PreparedStatement stmt = null;
-        try {
-            conn = DriverManager.getConnection(getUrl(), props);
-            conn.setAutoCommit(false);
-            try {
-                String ddl = "CREATE TABLE test_table " + "  (a_string varchar not null, col1 integer, cf1.col2 integer"
-                        + "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
-                stmt = conn.prepareStatement(ddl);
-                stmt.execute();
-            } finally {
-                closeStatement(stmt);
-            }
-            try {
-                stmt = conn.prepareStatement("ALTER TABLE test_table ADD b_string VARCHAR NOT NULL");
-                stmt.execute();
-                fail("Should have failed since altering a table by adding a non-nullable column is not allowed.");
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.CANNOT_ADD_NOT_NULLABLE_COLUMN.getErrorCode(), e.getErrorCode());
-            } finally {
-                closeStatement(stmt);
-            }
-        } finally {
-            closeConnection(conn);
-        }
-    }
-
-    private void asssertIsWALDisabled(Connection conn, String fullTableName, boolean expectedValue) throws SQLException {
-        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        assertEquals(expectedValue, pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).isWALDisabled());
-    }
-    
-    @Test
-    public void testDisableWAL() throws Exception {
-        String fullTableName = "TEST_TABLE";
-        String fullIndexName = "I";
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-
-        try {
-            conn.createStatement()
-                    .execute(
-                            "CREATE TABLE test_table "
-                                    + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-                                    + "  CONSTRAINT pk PRIMARY KEY (a_string)) immutable_rows=true, disable_wal=true ");
-
-            Connection conn2 = DriverManager.getConnection(getUrl(), props);
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn2.createStatement().executeQuery(query);
-            assertFalse(rs.next());
-            asssertIsWALDisabled(conn2,fullTableName, true);
-            conn2.close();
-            asssertIsWALDisabled(conn,fullTableName, true);
-
-            conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
-            conn2 = DriverManager.getConnection(getUrl(), props);
-            query = "SELECT * FROM i";
-            rs = conn2.createStatement().executeQuery(query);
-            asssertIsWALDisabled(conn2,fullIndexName, true);
-            assertFalse(rs.next());
-            conn2.close();
-            asssertIsWALDisabled(conn,fullIndexName, true);
-            
-            conn.createStatement().execute("DROP TABLE test_table");
-        } finally {
-            conn.close();
-        }
-        conn = DriverManager.getConnection(getUrl(), props);
-
-        try {
-            conn.createStatement()
-                    .execute(
-                            "CREATE TABLE test_table "
-                                    + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-                                    + "  CONSTRAINT pk PRIMARY KEY (a_string)) immutable_rows=true");
-
-            Connection conn2 = DriverManager.getConnection(getUrl(), props);
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn2.createStatement().executeQuery(query);
-            assertFalse(rs.next());
-            asssertIsWALDisabled(conn,fullTableName, false);
-            conn2.close();
-            asssertIsWALDisabled(conn,fullTableName, false);
-
-            conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
-            conn2 = DriverManager.getConnection(getUrl(), props);
-            query = "SELECT * FROM i";
-            rs = conn2.createStatement().executeQuery(query);
-            asssertIsWALDisabled(conn2,fullIndexName, true);
-            assertFalse(rs.next());
-            conn2.close();
-            asssertIsWALDisabled(conn,fullIndexName, true);
-            conn.createStatement().execute("DROP TABLE test_table");
-        } finally {
-            conn.close();
-        }
-        conn = DriverManager.getConnection(getUrl(), props);
-
-        try {
-            conn.createStatement()
-                    .execute(
-                            "CREATE TABLE test_table "
-                                    + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-                                    + "  CONSTRAINT pk PRIMARY KEY (a_string))");
-
-            Connection conn2 = DriverManager.getConnection(getUrl(), props);
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn2.createStatement().executeQuery(query);
-            assertFalse(rs.next());
-            asssertIsWALDisabled(conn2,fullTableName, false);
-            conn2.close();
-            asssertIsWALDisabled(conn,fullTableName, false);
-
-            conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
-            conn2 = DriverManager.getConnection(getUrl(), props);
-            query = "SELECT * FROM i";
-            rs = conn2.createStatement().executeQuery(query);
-            asssertIsWALDisabled(conn2,fullIndexName, false);
-            assertFalse(rs.next());
-            conn2.close();
-            asssertIsWALDisabled(conn,fullIndexName, false);
-            
-        } finally {
-            conn.close();
-        }
-    }
-
-    @Test
-    public void testDropColumnsWithImutability() throws Exception {
-
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-
-        try {
-            conn.createStatement()
-                    .execute(
-                            "CREATE TABLE test_table "
-                                    + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-                                    + "  CONSTRAINT pk PRIMARY KEY (a_string)) immutable_rows=true , SALT_BUCKETS=3 ");
-
-            String query = "SELECT * FROM test_table";
-            ResultSet rs = conn.createStatement().executeQuery(query);
-            assertFalse(rs.next());
-
-            conn.createStatement().execute("CREATE INDEX i ON test_table (col1) include (cf1.col2) SALT_BUCKETS=4");
-            query = "SELECT * FROM i";
-            rs = conn.createStatement().executeQuery(query);
-            assertFalse(rs.next());
-
-            String dml = "UPSERT INTO test_table VALUES(?,?,?,?,?)";
-            PreparedStatement stmt = conn.prepareStatement(dml);
-            stmt.setString(1, "b");
-            stmt.setInt(2, 10);
-            stmt.setInt(3, 20);
-            stmt.setInt(4, 30);
-            stmt.setInt(5, 40);
-            stmt.execute();
-            stmt.setString(1, "a");
-            stmt.setInt(2, 101);
-            stmt.setInt(3, 201);
-            stmt.setInt(4, 301);
-            stmt.setInt(5, 401);
-            stmt.execute();
-            conn.commit();
-
-            query = "SELECT * FROM test_table order by col1";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("b", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("a", rs.getString(1));
-            assertFalse(rs.next());
-
-            String ddl = "ALTER TABLE test_table DROP COLUMN IF EXISTS col2,col3";
-            conn.createStatement().execute(ddl);
-            
-            ddl = "ALTER TABLE test_table DROP COLUMN a_string,col1";
-            try{
-                conn.createStatement().execute(ddl);
-                fail();
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.CANNOT_DROP_PK.getErrorCode(), e.getErrorCode());
-            }
-            
-            ddl = "ALTER TABLE test_table DROP COLUMN col4,col5";
-            try {
-                conn.createStatement().execute(ddl);
-                fail();
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
-                assertTrue(e.getMessage(), e.getMessage().contains("ERROR 504 (42703): Undefined column. columnName=COL5"));
-            } 
-
-            ddl = "ALTER TABLE test_table DROP COLUMN IF EXISTS col1";
-            conn.createStatement().execute(ddl);
-            
-            query = "SELECT * FROM i";
-            try {
-                rs = conn.createStatement().executeQuery(query);
-                fail();
-            } catch (TableNotFoundException e) {}
-            
-            query = "select col4 FROM test_table";
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertTrue(rs.next());
-
-            query = "select col2,col3 FROM test_table";
-            try {
-                rs = conn.createStatement().executeQuery(query);
-                fail();
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
-            }
-              
-        } finally {
-            conn.close();
-        }
-    }
-   
- }


Mime
View raw message