phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sama...@apache.org
Subject phoenix git commit: PHOENIX-3994 Index RPC priority still depends on the controller factory property in hbase-site.xml
Date Fri, 14 Jul 2017 00:46:47 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.11-HBase-1.2 ca0415742 -> ca39cbb15


PHOENIX-3994 Index RPC priority still depends on the controller factory property in hbase-site.xml


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ca39cbb1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ca39cbb1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ca39cbb1

Branch: refs/heads/4.11-HBase-1.2
Commit: ca39cbb1565232730fe71f06f6eaed411a431721
Parents: ca04157
Author: Samarth Jain <samarth@apache.org>
Authored: Thu Jul 13 17:46:38 2017 -0700
Committer: Samarth Jain <samarth@apache.org>
Committed: Thu Jul 13 17:46:38 2017 -0700

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |   3 +-
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  |  17 ++-
 .../ipc/controller/IndexRpcController.java      |   6 +
 ...erRegionServerIndexRpcControllerFactory.java |  65 +++++++++++
 ...egionServerMetadataRpcControllerFactory.java |  62 ++++++++++
 .../ipc/controller/MetadataRpcController.java   |   5 +
 .../controller/ServerRpcControllerFactory.java  |  42 ++-----
 .../apache/phoenix/compile/UpsertCompiler.java  |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java  |  12 +-
 .../DelegateRegionCoprocessorEnvironment.java   | 114 +++++++++++++++++++
 .../UngroupedAggregateRegionObserver.java       |  27 ++++-
 .../org/apache/phoenix/hbase/index/Indexer.java |  23 ++--
 .../hbase/index/write/IndexWriterUtils.java     |  84 ++++++++++++--
 .../write/ParallelWriterIndexCommitter.java     |   4 +-
 .../org/apache/phoenix/util/PropertiesUtil.java |  20 ++++
 .../java/org/apache/phoenix/query/BaseTest.java |   4 -
 16 files changed, 408 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 0b48a1a..abd61c0 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -192,7 +193,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");
 
     WAL wal = createWAL(this.conf, walFactory);
-    RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
+    HRegionServer mockRS = Mockito.mock(HRegionServer.class);
     // mock out some of the internals of the RSS, so we can run CPs
     when(mockRS.getWAL(null)).thenReturn(wal);
     RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index b9e4fff..d8a9ed4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.never;
 
 import java.sql.Connection;
 import java.sql.PreparedStatement;
@@ -38,7 +39,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.ipc.CallRunner;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -67,9 +67,7 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
     public static void doSetup() throws Exception {
     	Map<String, String> serverProps = Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,

         		TestPhoenixIndexRpcSchedulerFactory.class.getName());
-        // use the standard rpc controller for client rpc, so that we can isolate server
rpc and ensure they use the correct queue  
-    	Map<String, String> clientProps = Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
-    			RpcControllerFactory.class.getName());
+        Map<String, String> clientProps = Collections.emptyMap();
         NUM_SLAVES_BASE = 2;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
     }
@@ -99,9 +97,11 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
             createIndex(conn, indexName);
 
             ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
-    
+            TestPhoenixIndexRpcSchedulerFactory.reset();
             upsertRow(conn, dataTableFullName);
-    
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor())
+                    .dispatch(Mockito.any(CallRunner.class));
+            TestPhoenixIndexRpcSchedulerFactory.reset();
             // run select query that should use the index
             String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
             PreparedStatement stmt = conn.prepareStatement(selectSql);
@@ -124,8 +124,10 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
             // create a data table with the same name as the index table
             createTable(conn, indexTableFullName);
             
+            TestPhoenixIndexRpcSchedulerFactory.reset();
             // upsert one row to the table (which has the same table name as the previous
index table)
             upsertRow(conn, indexTableFullName);
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(), never()).dispatch(Mockito.any(CallRunner.class));
             
             // run select query on the new table
             selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
@@ -139,9 +141,6 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
             assertEquals("v2", rs.getString(2));
             assertFalse(rs.next());
             
-            // verify that that index queue is used only once (for the first upsert)
-            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
-            
             TestPhoenixIndexRpcSchedulerFactory.reset();
             createIndex(conn, indexName + "_1");
             // verify that that index queue is used and only once (during Upsert Select on
server to build the index)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
index fdb1d33..86c4490 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
@@ -25,6 +25,12 @@ import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 
+import com.google.protobuf.RpcController;
+
+/**
+ * {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix
index
+ * tables.
+ */
 class IndexRpcController extends DelegatingPayloadCarryingRpcController {
 
     private final int priority;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
new file mode 100644
index 0000000..89b49b7
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that should only be used when creating {@link HTable} for
+ * making remote RPCs to the region servers hosting global mutable index table regions.
+ * This controller factory shouldn't be globally configured anywhere and is meant to be used
+ * only internally by Phoenix indexing code.
+ */
+public class InterRegionServerIndexRpcControllerFactory extends RpcControllerFactory {
+
+    public InterRegionServerIndexRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables)
{
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate)
{
+        // construct a chain of controllers: metadata, index and standard controller
+        IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
+        return new MetadataRpcController(indexRpcController, conf);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
new file mode 100644
index 0000000..ec4583b
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that should only be used when creating {@link HTable} for
+ * making remote RPCs to the region servers hosting Phoenix SYSTEM tables.
+ */
+public class InterRegionServerMetadataRpcControllerFactory extends RpcControllerFactory {
+
+    public InterRegionServerMetadataRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables)
{
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate)
{
+        // construct a chain of controllers: metadata and delegate controller
+        return new MetadataRpcController(delegate, conf);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
index df833f9..e8fab25 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
@@ -28,7 +28,12 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.ImmutableList;
+import com.google.protobuf.RpcController;
 
+/**
+ * {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix
SYSTEM
+ * tables
+ */
 class MetadataRpcController extends DelegatingPayloadCarryingRpcController {
 
 	private int priority;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
index 8c17eda..283bb6e 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
@@ -17,46 +17,20 @@
  */
 package org.apache.hadoop.hbase.ipc.controller;
 
-import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 
 /**
- * {@link RpcControllerFactory} that sets the priority of index and metadata rpc calls
- * so that they are each processed in their own queues
+ * {@link RpcControllerFactory} that sets the priority of metadata rpc calls so that they
get
+ * processed in their own queue. This controller factory should only be configured on the
region
+ * servers. It is meant to control inter region server RPCs destined for Phoenix SYSTEM tables.
+ * <p>
+ * This class is left here only for legacy reasons so as to not break users who have configured
this
+ * {@link RpcControllerFactory} in their hbase-site.xml.
  */
-public class ServerRpcControllerFactory extends RpcControllerFactory {
-
+@Deprecated
+public class ServerRpcControllerFactory extends InterRegionServerMetadataRpcControllerFactory
{
     public ServerRpcControllerFactory(Configuration conf) {
         super(conf);
     }
-
-    @Override
-    public PayloadCarryingRpcController newController() {
-        PayloadCarryingRpcController delegate = super.newController();
-        return getController(delegate);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
-        PayloadCarryingRpcController delegate = super.newController(cellScanner);
-        return getController(delegate);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables)
{
-        PayloadCarryingRpcController delegate = super.newController(cellIterables);
-        return getController(delegate);
-    }
-    
-    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate)
{
-    	// construct a chain of controllers: metadata, index and standard controller
-    	IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
-		return new MetadataRpcController(indexRpcController, conf);
-    }
-    
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index d3bfc2d..b99727b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -532,7 +532,9 @@ public class UpsertCompiler {
                 // If we're in the else, then it's not an aggregate, distinct, limited, or
sequence using query,
                 // so we might be able to run it entirely on the server side.
                 // region space managed by region servers. So we bail out on executing on
server side.
-                runOnServer = (sameTable || serverUpsertSelectEnabled) && isAutoCommit
&& !table.isTransactional()
+                // Disable running upsert select on server side if a table has global mutable
secondary indexes on it
+                boolean hasGlobalMutableIndexes = SchemaUtil.hasGlobalIndex(table) &&
!table.isImmutableRows();
+                runOnServer = (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes))
&& isAutoCommit && !table.isTransactional()
                         && !(table.isImmutableRows() && !table.getIndexes().isEmpty())
                         && !select.isJoin() && table.getRowTimestampColPos()
== -1;
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 9dc1e47413..d096a19 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -20,19 +20,14 @@ package org.apache.phoenix.coprocessor;
 import java.io.IOException;
 import java.util.List;
 
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-
 import org.apache.hadoop.hbase.NotServingRegionException;
-
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -44,13 +39,13 @@ import org.apache.htrace.Trace;
 import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.iterate.NonAggregateRegionScannerFactory;
+import org.apache.phoenix.iterate.RegionScannerFactory;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.phoenix.iterate.RegionScannerFactory;
-import org.apache.phoenix.iterate.NonAggregateRegionScannerFactory;
 
 abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
 
@@ -113,15 +108,12 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver
{
 
     /** Exposed for testing */
     public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on server";
-    protected Configuration rawConf;
     protected QualifierEncodingScheme encodingScheme;
     protected boolean useNewValueColumnQualifier;
 
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
         super.start(e);
-        this.rawConf =
-                ((RegionCoprocessorEnvironment) e).getRegionServerServices().getConfiguration();
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
new file mode 100644
index 0000000..380212e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+
+/**
+ * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often
we
+ * clone the configuration provided by the HBase coprocessor environment before modifying
it. So
+ * this class comes in handy where we have to return our custom config.
+ */
+public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment
{
+
+    private final Configuration config;
+    private RegionCoprocessorEnvironment delegate;
+
+    public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment
delegate) {
+        this.config = config;
+        this.delegate = delegate;
+    }
+
+    @Override
+    public int getVersion() {
+        return delegate.getVersion();
+    }
+
+    @Override
+    public String getHBaseVersion() {
+        return delegate.getHBaseVersion();
+    }
+
+    @Override
+    public Coprocessor getInstance() {
+        return delegate.getInstance();
+    }
+
+    @Override
+    public int getPriority() {
+        return delegate.getPriority();
+    }
+
+    @Override
+    public int getLoadSequence() {
+        return delegate.getLoadSequence();
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+        return config;
+    }
+
+    @Override
+    public HTableInterface getTable(TableName tableName) throws IOException {
+        return delegate.getTable(tableName);
+    }
+
+    @Override
+    public HTableInterface getTable(TableName tableName, ExecutorService service)
+            throws IOException {
+        return delegate.getTable(tableName, service);
+    }
+
+    @Override
+    public ClassLoader getClassLoader() {
+        return delegate.getClassLoader();
+    }
+
+    @Override
+    public Region getRegion() {
+        return delegate.getRegion();
+    }
+
+    @Override
+    public HRegionInfo getRegionInfo() {
+        return delegate.getRegionInfo();
+    }
+
+    @Override
+    public RegionServerServices getRegionServerServices() {
+        return delegate.getRegionServerServices();
+    }
+
+    @Override
+    public ConcurrentMap<String, Object> getSharedData() {
+        return delegate.getSharedData();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index a108e49..9013f20 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.InterRegionServerIndexRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -114,6 +116,7 @@ import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.StringUtil;
@@ -174,6 +177,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
     private boolean isRegionClosing = false;
     private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
     private KeyValueBuilder kvBuilder;
+    private Configuration upsertSelectConfig;
 
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
@@ -181,6 +185,19 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         // Can't use ClientKeyValueBuilder on server-side because the memstore expects to
         // be able to get a single backing buffer for a KeyValue.
         this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
+        /*
+         * We need to create a copy of region's configuration since we don't want any side
effect of
+         * setting the RpcControllerFactory.
+         */
+        upsertSelectConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
+        /*
+         * Till PHOENIX-3995 is fixed, we need to use the
+         * InterRegionServerIndexRpcControllerFactory. Although this would cause remote RPCs
to use
+         * index handlers on the destination region servers, it is better than using the
regular
+         * priority handlers which could result in a deadlock.
+         */
+        upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+            InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class);
     }
 
     private void commitBatch(Region region, List<Mutation> mutations, byte[] indexUUID,
long blockingMemstoreSize,
@@ -392,7 +409,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         if (upsertSelectTable != null) {
             isUpsert = true;
             projectedTable = deserializeTable(upsertSelectTable);
-            targetHTable = new HTable(env.getConfiguration(), projectedTable.getPhysicalName().getBytes());
+            targetHTable = new HTable(upsertSelectConfig, projectedTable.getPhysicalName().getBytes());
             selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
             values = new byte[projectedTable.getPKColumns().size()][];
             areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(),
@@ -433,7 +450,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         long maxBatchSizeBytes = 0L;
         MutationList mutations = new MutationList();
         boolean needToWrite = false;
-        Configuration conf = c.getEnvironment().getConfiguration();
+        Configuration conf = env.getConfiguration();
         long flushSize = region.getTableDesc().getMemStoreFlushSize();
 
         if (flushSize <= 0) {
@@ -454,13 +471,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         boolean buildLocalIndex = indexMaintainers != null && dataColumns==null &&
!localIndexScan;
         if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null &&
deleteCF != null) || emptyCF != null || buildLocalIndex) {
             needToWrite = true;
-            maxBatchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
+            maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
             mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize /
10));
-            maxBatchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
+            maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
                 QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
         }
         Aggregators aggregators = ServerAggregators.deserialize(
-                scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
+                scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf);
         Aggregator[] rowAggregators = aggregators.getAggregators();
         boolean hasMore;
         boolean hasAny = false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index ab8c434..7d94fdc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.InterRegionServerIndexRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
+import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.builder.IndexBuildManager;
 import org.apache.phoenix.hbase.index.builder.IndexBuilder;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
@@ -77,6 +78,7 @@ import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
 import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import org.apache.phoenix.trace.TracingUtils;
 import org.apache.phoenix.trace.util.NullSpan;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 
 import com.google.common.collect.Multimap;
@@ -105,10 +107,9 @@ import com.google.common.collect.Multimap;
 public class Indexer extends BaseRegionObserver {
 
   private static final Log LOG = LogFactory.getLog(Indexer.class);
-
+  private RegionCoprocessorEnvironment environment;
   protected IndexWriter writer;
   protected IndexBuildManager builder;
-  private RegionCoprocessorEnvironment environment;
 
   /** Configuration key for the {@link IndexBuilder} to use */
   public static final String INDEX_BUILDER_CONF_KEY = "index.builder";
@@ -151,8 +152,6 @@ public class Indexer extends BaseRegionObserver {
       try {
         final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
         this.environment = env;
-        env.getConfiguration().setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
-                ServerRpcControllerFactory.class, RpcControllerFactory.class);
         String serverName = env.getRegionServerServices().getServerName().getServerName();
         if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
           // make sure the right version <-> combinations are allowed.
@@ -165,9 +164,17 @@ public class Indexer extends BaseRegionObserver {
         }
     
         this.builder = new IndexBuildManager(env);
-
+        // Clone the config since it is shared
+        Configuration clonedConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
+        /*
+         * Set the rpc controller factory so that the HTables used by IndexWriter would
+         * set the correct priorities on the remote RPC calls.
+         */
+        clonedConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+                InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class);
+        DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(clonedConfig,
env);
         // setup the actual index writer
-        this.writer = new IndexWriter(env, serverName + "-index-writer");
+        this.writer = new IndexWriter(indexWriterEnv, serverName + "-index-writer");
         try {
           // get the specified failure policy. We only ever override it in tests, but we
need to do it
           // here
@@ -178,7 +185,7 @@ public class Indexer extends BaseRegionObserver {
               policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits);
           LOG.debug("Setting up recovery writter with failure policy: " + policy.getClass());
           recoveryWriter =
-              new RecoveryIndexWriter(policy, env, serverName + "-recovery-writer");
+              new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer");
         } catch (Exception ex) {
           throw new IOException("Could not instantiate recovery failure policy!", ex);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 6eb657d..ea4ec1a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -17,12 +17,24 @@
  */
 package org.apache.phoenix.hbase.index.write;
 
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+
+import javax.annotation.concurrent.GuardedBy;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.client.CoprocessorHConnection;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.phoenix.hbase.index.table.CoprocessorHTableFactory;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 
 public class IndexWriterUtils {
@@ -61,14 +73,66 @@ public class IndexWriterUtils {
     // private ctor for utilites
   }
 
-  public static HTableFactory getDefaultDelegateHTableFactory(CoprocessorEnvironment env)
{
-    // create a simple delegate factory, setup the way we need
-    Configuration conf = env.getConfiguration();
-    // set the number of threads allowed per table.
-    int htableThreads =
-        conf.getInt(IndexWriterUtils.INDEX_WRITER_PER_TABLE_THREADS_CONF_KEY, IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS);
-    LOG.trace("Creating HTableFactory with " + htableThreads + " threads for each HTable.");
-    IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
-    return new CoprocessorHTableFactory(env);
-  }
+    public static HTableFactory getDefaultDelegateHTableFactory(CoprocessorEnvironment env)
{
+        // create a simple delegate factory, setup the way we need
+        Configuration conf = env.getConfiguration();
+        // set the number of threads allowed per table.
+        int htableThreads =
+                conf.getInt(IndexWriterUtils.INDEX_WRITER_PER_TABLE_THREADS_CONF_KEY,
+                    IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS);
+        LOG.trace("Creating HTableFactory with " + htableThreads + " threads for each HTable.");
+        IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
+        if (env instanceof RegionCoprocessorEnvironment) {
+            RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
+            RegionServerServices services = e.getRegionServerServices();
+            if (services instanceof HRegionServer) {
+                return new CoprocessorHConnectionTableFactory(conf, (HRegionServer) services);
+            }
+        }
+        return new CoprocessorHTableFactory(env);
+    }
+
+    /**
+     * {@code HTableFactory} that creates HTables by using a {@link CoprocessorHConnection}
This
+     * factory was added as a workaround to the bug reported in
+     * https://issues.apache.org/jira/browse/HBASE-18359
+     */
+    private static class CoprocessorHConnectionTableFactory implements HTableFactory {
+        @GuardedBy("CoprocessorHConnectionTableFactory.this")
+        private HConnection connection;
+        private final Configuration conf;
+        private final HRegionServer server;
+
+        CoprocessorHConnectionTableFactory(Configuration conf, HRegionServer server) {
+            this.conf = conf;
+            this.server = server;
+        }
+
+        private synchronized HConnection getConnection(Configuration conf) throws IOException
{
+            if (connection == null || connection.isClosed()) {
+                connection = new CoprocessorHConnection(conf, server);
+            }
+            return connection;
+        }
+
+        @Override
+        public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
+            return getConnection(conf).getTable(tablename.copyBytesIfNecessary());
+        }
+
+        @Override
+        public void shutdown() {
+            try {
+                getConnection(conf).close();
+            } catch (IOException e) {
+                LOG.error("Exception caught while trying to close the HConnection used by
CoprocessorHConnectionTableFactory");
+            }
+        }
+
+        @Override
+        public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool)
+                throws IOException {
+            return getConnection(conf).getTable(tablename.copyBytesIfNecessary(), pool);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 7510c5b..a537010 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -158,8 +158,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                                 return null;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow
way
-                                if (LOG.isTraceEnabled()) {
-                                    LOG.trace("indexRegion.batchMutate failed and fall back
to HTable.batch(). Got error="
+                                if (LOG.isDebugEnabled()) {
+                                    LOG.debug("indexRegion.batchMutate failed and fall back
to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index bcb9aa4..f59c01b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.util;
 
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 
@@ -58,4 +59,23 @@ public class PropertiesUtil {
         }
         return props;
     }
+
+    /**
+     * Utility to work around the limitation of the copy constructor
+     * {@link Configuration#Configuration(Configuration)} provided by the {@link Configuration}
+     * class. See https://issues.apache.org/jira/browse/HBASE-18378.
+     * The copy constructor doesn't copy all the config settings, so we need to resort to
+     * iterating through all the settings and setting it on the cloned config.
+     * @param toCopy  configuration to copy
+     * @return
+     */
+    public static Configuration cloneConfig(Configuration toCopy) {
+        Configuration clone = new Configuration();
+        Iterator<Entry<String, String>> iterator = toCopy.iterator();
+        while (iterator.hasNext()) {
+            Entry<String, String> entry = iterator.next();
+            clone.set(entry.getKey(), entry.getValue());
+        }
+        return clone;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca39cbb1/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 739d627..0fb8a0d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -119,8 +119,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseClientManagedTimeIT;
@@ -416,7 +414,6 @@ public abstract class BaseTest {
     
     private static final String ORG_ID = "00D300000000XHP";
     protected static int NUM_SLAVES_BASE = 1;
-    private static final String DEFAULT_SERVER_RPC_CONTROLLER_FACTORY = ServerRpcControllerFactory.class.getName();
     private static final String DEFAULT_RPC_SCHEDULER_FACTORY = PhoenixRpcSchedulerFactory.class.getName();
     
     protected static String getZKClientPort(Configuration conf) {
@@ -634,7 +631,6 @@ public abstract class BaseTest {
         //no point doing sanity checks when running tests.
         conf.setBoolean("hbase.table.sanity.checks", false);
         // set the server rpc controller and rpc scheduler factory, used to configure the
cluster
-        conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_SERVER_RPC_CONTROLLER_FACTORY);
         conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, DEFAULT_RPC_SCHEDULER_FACTORY);
         
         // override any defaults based on overrideProps


Mime
View raw message