phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jeffr...@apache.org
Subject [14/50] git commit: ReEnable replication setting & RowKeyValueAccessorTest fix
Date Mon, 10 Mar 2014 06:20:31 GMT
ReEnable replication setting & RowKeyValueAccessorTest fix


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/b2516159
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/b2516159
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/b2516159

Branch: refs/heads/master
Commit: b2516159087a4f66d3bb29d1807261a5b8588029
Parents: 420811d
Author: Jeffrey Zhong <jzhong@JZhongs-MacBook-Pro.local>
Authored: Tue Feb 11 14:36:57 2014 -0800
Committer: Jeffrey Zhong <jzhong@JZhongs-MacBook-Pro.local>
Committed: Tue Feb 11 14:36:57 2014 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/index/wal/IndexedKeyValue.java | 20 +++++----
 .../query/ConnectionQueryServicesImpl.java      | 12 ++----
 .../org/apache/phoenix/util/ConfigUtil.java     | 44 ++++++++++++++++++++
 .../TestFailForUnsupportedHBaseVersions.java    | 12 +++---
 .../TestEndToEndCoveredColumnsIndexBuilder.java | 17 ++++----
 .../example/TestEndToEndCoveredIndexing.java    | 12 +++---
 .../TestEndtoEndIndexingWithCompression.java    | 10 ++---
 .../covered/example/TestFailWithoutRetries.java | 18 ++++----
 ...ALReplayWithIndexWritesAndCompressedWAL.java |  5 ++-
 ...exWritesAndUncompressedWALInHBase_094_9.java |  6 +--
 .../phoenix/schema/RowKeyValueAccessorTest.java |  9 ++--
 11 files changed, 106 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/wal/IndexedKeyValue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/wal/IndexedKeyValue.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/wal/IndexedKeyValue.java
index 5b2c6b4..6b30735 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/wal/IndexedKeyValue.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/wal/IndexedKeyValue.java
@@ -1,9 +1,7 @@
 package org.apache.hadoop.hbase.index.wal;
 
-import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -11,15 +9,15 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
 public class IndexedKeyValue extends KeyValue {
+    public static final byte [] COLUMN_FAMILY = Bytes.toBytes("INDEXEDKEYVALUE_FAKED_FAMILY");
+  
     private static int calcHashCode(ImmutableBytesPtr indexTableName, Mutation mutation)
{
         final int prime = 31;
         int result = 1;
@@ -50,9 +48,17 @@ public class IndexedKeyValue extends KeyValue {
         return mutation;
     }
 
+    /*
+     * Returns a faked column family for an IndexedKeyValue instance
+     */
+    @Override
+    public byte [] getFamily() {
+      return COLUMN_FAMILY;
+    }
+    
     /**
-     * This is a KeyValue that shouldn't actually be replayed, so we always mark it as an
{@link WALEdit#METAFAMILY} so it
-     * isn't replayed via the normal replay mechanism
+     * This is a KeyValue that shouldn't actually be replayed/replicated, so we always mark
it as 
+     * an {@link WALEdit#METAFAMILY} so it isn't replayed/replicated via the normal replay
mechanism
      */
     @Override
     public boolean matchingFamily(final byte[] family) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index cfc4960..71a287d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -109,6 +109,7 @@ import org.apache.phoenix.schema.Sequence;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.ConfigUtil;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -164,14 +165,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices
implement
         // Without making a copy of the configuration we cons up, we lose some of our properties
         // on the server side during testing.
         this.config = HBaseConfiguration.create(config);
-         // set default value for hbase.master.logcleaner.plugin if not set yet
-         if(this.config.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS) == null){
-               this.config.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
-                   "org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner");
-         }
-         // disable replication for Phoenix component
-         this.config.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
-         this.props = new ReadOnlyProps(this.config.iterator());
+        // set replication required parameter
+        ConfigUtil.setReplicationConfigIfAbsent(this.config);
+        this.props = new ReadOnlyProps(this.config.iterator());
         // TODO: should we track connection wide memory usage or just org-wide usage?
         // If connection-wide, create a MemoryManager here, otherwise just use the one from
the delegate
         this.childServices = new ConcurrentHashMap<ImmutableBytesWritable,ConnectionQueryServices>(INITIAL_CHILD_SERVICES_CAPACITY);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/main/java/org/apache/phoenix/util/ConfigUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ConfigUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ConfigUtil.java
new file mode 100644
index 0000000..7cf73dc
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ConfigUtil.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+
+public class ConfigUtil {
+  /**
+   * This function set missed replication configuration settings. It should only be used
in testing
+   * env.
+   * @param conf
+   */
+  public static void setReplicationConfigIfAbsent(Configuration conf) {
+    // set replication required parameter
+    String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
+    if (plugins == null) {
+      conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, "");
+    }
+    String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM);
+    if (ensemble == null) {
+      conf.set(HConstants.ZOOKEEPER_QUORUM, "localhost");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
index 9bff7fc..d4846d0 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
@@ -32,15 +32,15 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.index.covered.example.ColumnGroup;
+import org.apache.hadoop.hbase.index.covered.example.CoveredColumn;
+import org.apache.hadoop.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.util.ConfigUtil;
 import org.junit.Test;
 
-import org.apache.hadoop.hbase.index.covered.example.ColumnGroup;
-import org.apache.hadoop.hbase.index.covered.example.CoveredColumn;
-import org.apache.hadoop.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-
 /**
  * Test that we correctly fail for versions of HBase that don't support current properties
  */
@@ -116,8 +116,8 @@ public class TestFailForUnsupportedHBaseVersions {
 
     // start the minicluster
     HBaseTestingUtility util = new HBaseTestingUtility(conf);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
     util.startMiniCluster();
 
     // setup the primary table

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
index 99ea0c7..a3046a1 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
@@ -36,28 +36,27 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.index.IndexTestingUtils;
+import org.apache.hadoop.hbase.index.Indexer;
+import org.apache.hadoop.hbase.index.TableName;
+import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
+import org.apache.hadoop.hbase.index.scanner.Scanner;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.ConfigUtil;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.TableName;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.scanner.Scanner;
-
 /**
  * End-to-End test of just the {@link CoveredColumnsIndexBuilder}, but with a simple
  * {@link IndexCodec} and BatchCache implementation.
@@ -102,8 +101,8 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
     // disable version checking, so we can test against whatever version of HBase happens
to be
     // installed (right now, its generally going to be SNAPSHOT versions).
     conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
     UTIL.startMiniCluster();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
index 69aff9d..6923016 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
@@ -41,18 +41,18 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.index.IndexTestingUtils;
+import org.apache.hadoop.hbase.index.Indexer;
+import org.apache.hadoop.hbase.index.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.util.ConfigUtil;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.TableName;
-
 /**
  * Test Covered Column indexing in an 'end-to-end' manner on a minicluster. This covers cases
where
  * we manage custom timestamped updates that arrive in and out of order as well as just using
the
@@ -106,8 +106,8 @@ public class TestEndToEndCoveredIndexing {
     // disable version checking, so we can test against whatever version of HBase happens
to be
     // installed (right now, its generally going to be SNAPSHOT versions).
     conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
     UTIL.startMiniCluster();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
index f357276..bdc9020 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.hbase.index.covered.example;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.index.IndexTestingUtils;
+import org.apache.hadoop.hbase.index.Indexer;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
+import org.apache.phoenix.util.ConfigUtil;
 import org.junit.BeforeClass;
 
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-
 /**
  * Test secondary indexing from an end-to-end perspective (client to server to index table).
  */
@@ -44,8 +44,8 @@ public class TestEndtoEndIndexingWithCompression extends TestEndToEndCoveredInde
     conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY,
     IndexedWALEditCodec.class.getName());
     conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
     //start the mini-cluster
     UTIL.startMiniCluster();
   }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
index e4810ff..60ff1c8 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -34,18 +35,19 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
 import org.apache.hadoop.hbase.index.IndexTestingUtils;
+import org.apache.hadoop.hbase.index.Indexer;
 import org.apache.hadoop.hbase.index.TableName;
 import org.apache.hadoop.hbase.index.covered.IndexUpdate;
 import org.apache.hadoop.hbase.index.covered.TableState;
 import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.index.BaseIndexCodec;
+import org.apache.phoenix.util.ConfigUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
 
 /**
  * If {@link DoNotRetryIOException} is not subclassed correctly (with the {@link String}
@@ -86,8 +88,8 @@ public class TestFailWithoutRetries {
     Configuration conf = UTIL.getConfiguration();
     IndexTestingUtils.setupConfig(conf);
     IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
     // start the cluster
     UTIL.startMiniCluster();
   }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
index 003f2b1..4c8b01d 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndCompressedWAL.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.phoenix.util.ConfigUtil;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -91,8 +92,8 @@ public class TestWALReplayWithIndexWritesAndCompressedWAL {
 
     // enable WAL compression
     conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
   }
 
   protected final void setDefaults(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
index fe23ce3..a92eaf7 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9.java
@@ -2,8 +2,8 @@ package org.apache.hadoop.hbase.regionserver.wal;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-
 import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
+import org.apache.phoenix.util.ConfigUtil;
 
 /**
  * Do the WAL Replay test but with the WALEditCodec, rather than an {@link IndexedHLogReader},
but
@@ -20,7 +20,7 @@ public class TestWALReplayWithIndexWritesAndUncompressedWALInHBase_094_9
extends
 
     // disable WAL compression
     conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
-    // disable replication
-    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
+    // set replication required parameter
+    ConfigUtil.setReplicationConfigIfAbsent(conf);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/b2516159/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
index 4a93b73..7d7c4d6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
@@ -11,12 +11,11 @@ import java.util.List;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Test;
-
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
 
 public class RowKeyValueAccessorTest  extends BaseConnectionlessQueryTest  {
 
@@ -51,9 +50,9 @@ public class RowKeyValueAccessorTest  extends BaseConnectionlessQueryTest
 {
         
         List<PColumn> pkColumns = table.getPKColumns();
         RowKeyValueAccessor accessor = new RowKeyValueAccessor(pkColumns, 3);
-        int offset = accessor.getOffset(keyValue.getBuffer(), keyValue.getRowOffset());
-        int length = accessor.getLength(keyValue.getBuffer(), offset, keyValue.getOffset()+keyValue.getLength());
-        ImmutableBytesWritable ptr = new ImmutableBytesWritable(keyValue.getBuffer(), offset,
length);
+        int offset = accessor.getOffset(keyValue.getRowArray(), keyValue.getRowOffset());
+        int length = accessor.getLength(keyValue.getRowArray(), offset, keyValue.getOffset()+keyValue.getLength());
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable(keyValue.getRowArray(), offset,
length);
         
         PDataType dataType = pkColumns.get(index).getDataType();
         Object expectedObject = dataType.toObject(values[index], PDataType.fromLiteral(values[index]));


Mime
View raw message