phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rajeshb...@apache.org
Subject phoenix git commit: PHOENIX-2401 Split table is failing when the table is local indexed in secure cluster(Ted Yu)
Date Thu, 19 Nov 2015 11:19:23 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 0f6c32b22 -> 5d624a2b0


PHOENIX-2401 Split table is failing when the table is local indexed in secure cluster(Ted
Yu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5d624a2b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5d624a2b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5d624a2b

Branch: refs/heads/4.x-HBase-0.98
Commit: 5d624a2b0675da9c44f64b535ad80b0f707bb152
Parents: 0f6c32b
Author: Rajeshbabu Chintaguntla <rajeshbabu@apache.org>
Authored: Thu Nov 19 16:49:33 2015 +0530
Committer: Rajeshbabu Chintaguntla <rajeshbabu@apache.org>
Committed: Thu Nov 19 16:49:33 2015 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/LocalIndexSplitter.java  | 12 ++++++--
 .../UngroupedAggregateRegionObserver.java       | 32 +++++++++++++-------
 2 files changed, 31 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5d624a2b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 66b9a87..5b149d8 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.SQLException;
 import java.util.List;
 
@@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -68,7 +70,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
         if (SchemaUtil.isSystemTable(tableDesc.getName())) {
             return;
         }
-        RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
+        final RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
         if (tableDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) == null
                 || !Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(tableDesc
                         .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
@@ -107,7 +109,13 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                     return;
                 }
                 indexRegion.forceSplit(splitKey);
-                daughterRegions = st.stepsBeforePONR(rss, rss, false);
+                User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+                   @Override
+                   public Void run() throws Exception {                  
+                     daughterRegions = st.stepsBeforePONR(rss, rss, false);
+                     return null;
+                   }
+                 });
                 HRegionInfo copyOfParent = new HRegionInfo(indexRegion.getRegionInfo());
                 copyOfParent.setOffline(true);
                 copyOfParent.setSplit(true);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5d624a2b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 7316bb1..b3ff7e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -28,6 +28,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.coprocessor.generated.PTableProtos;
@@ -637,11 +639,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     
     
     @Override
-    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion
l, HRegion r)
-            throws IOException {
-        HRegion region = e.getEnvironment().getRegion();
-        TableName table = region.getRegionInfo().getTable();
-        StatisticsCollector stats = null;
+    public void postSplit(final ObserverContext<RegionCoprocessorEnvironment> e, final
HRegion l,
+            final HRegion r) throws IOException {
+        final HRegion region = e.getEnvironment().getRegion();
+        final TableName table = region.getRegionInfo().getTable();
         try {
             boolean useCurrentTime = 
                     e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,

@@ -649,16 +650,25 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             // Provides a means of clients controlling their timestamps to not use current
time
             // when background tasks are updating stats. Instead we track the max timestamp
of
             // the cells and use that.
-            long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() :
StatisticsCollector.NO_TIMESTAMP;
-            stats = new StatisticsCollector(e.getEnvironment(), table.getNameAsString(),
clientTimeStamp);
-            stats.splitStats(region, l, r);
+            final long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime()
: StatisticsCollector.NO_TIMESTAMP;
+            User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 StatisticsCollector stats = new StatisticsCollector(e.getEnvironment(),
+                   table.getNameAsString(), clientTimeStamp);
+                 try {
+                   stats.splitStats(region, l, r);
+                   return null;
+                 } finally {
+                   if (stats != null) stats.close();
+                 }
+               }
+             });
         } catch (IOException ioe) { 
             if(logger.isWarnEnabled()) {
                 logger.warn("Error while collecting stats during split for " + table,ioe);
             }
-        } finally {
-            if (stats != null) stats.close();
-        }
+        } 
     }
 
     private static PTable deserializeTable(byte[] b) {


Mime
View raw message